Skip to content

Commit

Permalink
[pytorch] add matmul sample
Browse files Browse the repository at this point in the history
  • Loading branch information
Avimitin committed Aug 8, 2024
1 parent e196ba7 commit e0cd867
Show file tree
Hide file tree
Showing 3 changed files with 95 additions and 0 deletions.
30 changes: 30 additions & 0 deletions tests/pytorch/matmul/config.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
{
includes = [
../memref.h
];

buddyOptArgs = [
[
"--pass-pipeline"
"builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith), empty-tensor-to-alloc-tensor, convert-elementwise-to-linalg, arith-bufferize, func.func(linalg-bufferize, tensor-bufferize), func-bufferize)"
]
[
"--pass-pipeline"
"builtin.module(func.func(buffer-deallocation-simplification, convert-linalg-to-loops), eliminate-empty-tensors, func.func(llvm-request-c-wrappers))"
]
[
"--lower-affine"
"--convert-math-to-llvm"
"--convert-math-to-libm"
"--convert-scf-to-cf"
"--convert-arith-to-llvm"
"--expand-strided-metadata"
"--finalize-memref-to-llvm"
"--lower-vector-exp"
"--lower-rvv=rv32"
"--convert-vector-to-llvm"
"--convert-func-to-llvm"
"--reconcile-unrealized-casts"
]
];
}
39 changes: 39 additions & 0 deletions tests/pytorch/matmul/matmul.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#include "memref.h"

NEW_MEMREF(float, 3);

extern void _mlir_ciface_forward(struct MemRef_float_dim1 *output,
struct MemRef_float_dim1 *arg1,
struct MemRef_float_dim1 *arg2);

__attribute((section(".vdata"))) float input_float_0[64][32][2];
struct MemRef_float_dim3 input1 = {
.allocatedPtr = input_float_0,
.alignedPtr = input_float_0,
.offset = 0,
.sizes = {64, 32, 2},
.strides = {2 * 32, 2, 1},
};

__attribute((section(".vdata"))) float input_float_1[64][2][8];
struct MemRef_float_dim3 input2 = {
.allocatedPtr = input_float_1,
.alignedPtr = input_float_1,
.offset = 0,
.sizes = {64, 2, 8},
.strides = {2 * 8, 8, 1},
};

__attribute((section(".vdata"))) float output_float_0[64][32][8];
struct MemRef_float_dim3 output = {
.allocatedPtr = output_float_0,
.alignedPtr = output_float_0,
.offset = 0,
.sizes = {64, 32, 8},
.strides = {32 * 8, 8, 1},
};

int test() {
_mlir_ciface_forward(&output, &input1, &input2);
return 0;
}
26 changes: 26 additions & 0 deletions tests/pytorch/matmul/matmul.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import torch
import torch._dynamo as dynamo
from torch._inductor.decomposition import decompositions as inductor_decomp

from buddy.compiler.frontend import DynamoCompiler
from buddy.compiler.ops import tosa

# Define the input data.
float32_in1 = torch.randn(64, 32, 2).to(torch.float32)
float32_in2 = torch.randn(64, 2, 8).to(torch.float32)

# Initialize the dynamo compiler.
dynamo_compiler = DynamoCompiler(
primary_registry=tosa.ops_registry,
aot_autograd_decomposition=inductor_decomp,
)

# Pass the function and input data to the dynamo compiler's importer, the
# importer will first build a graph. Then, lower the graph to top-level IR.
# (tosa, linalg, etc.). Finally, accepts the generated module and weight parameters.
graphs = dynamo_compiler.importer(torch.matmul, *(float32_in1, float32_in2))
graph = graphs[0]
graph.lower_to_top_level_ir()

with open("forward.mlir", "w") as mlir_module:
print(graph._imported_module, file = mlir_module)

0 comments on commit e0cd867

Please sign in to comment.