Skip to content

Commit

Permalink
[tests] add pytorch.matmul
Browse files Browse the repository at this point in the history
Signed-off-by: Avimitin <[email protected]>
  • Loading branch information
Avimitin authored and sequencer committed Aug 25, 2024
1 parent bb1e81c commit 2357c6c
Show file tree
Hide file tree
Showing 4 changed files with 88 additions and 1 deletion.
4 changes: 3 additions & 1 deletion .github/cases/blastoise/default.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
{
"pytorch.demo": -1,
"pytorch.lenet": -1,
"pytorch.matmul": -1,
"mlir.rvv_vp_intrinsic_add": 436,
"mlir.rvv_vp_intrinsic_add_scalable": 584,
"mlir.hello": 146,
Expand Down Expand Up @@ -511,4 +513,4 @@
"rvv_bench.poly1305": 4,
"rvv_bench.strlen": 877754,
"rvv_bench.utf8_count": 6340466
}
}
39 changes: 39 additions & 0 deletions tests/pytorch/matmul/build.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
{ buildBuddyE2ETest }:
buildBuddyE2ETest {
caseName = "matmul";

optPhase = ''
echo "Lowering forward.mlir"
python ./matmul.py \
| buddy-opt --pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-arith, tosa-to-linalg, tosa-to-tensor))" \
| buddy-opt --convert-elementwise-to-linalg \
--func-bufferize-dynamic-offset \
--arith-bufferize \
--func-bufferize \
--tensor-bufferize \
--linalg-bufferize \
--finalizing-bufferize \
--batchmatmul-optimize \
--convert-linalg-to-affine-loops \
--lower-affine \
--lower-vector-exp \
--lower-rvv=rv32 \
--convert-vector-to-scf \
--convert-scf-to-cf \
--llvm-request-c-wrappers \
--convert-vector-to-llvm \
--convert-math-to-llvm \
--convert-math-to-libm \
--convert-arith-to-llvm \
--convert-func-to-llvm \
--expand-strided-metadata \
--finalize-memref-to-llvm \
--reconcile-unrealized-casts \
-o forward-lowered.mlir
optArtifacts+=(
"forward-lowered.mlir"
)
'';
}
22 changes: 22 additions & 0 deletions tests/pytorch/matmul/matmul.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#include "memref.hpp"

extern "C" void _mlir_ciface_forward(MemRef<float, 1> *output,
MemRef<float, 1> *arg1,
MemRef<float, 1> *arg2);

// One-dimension, with length 512
static const int32_t sizes[3] = {8, 8, 8};

__attribute((section(".vdata"))) float input_float_1[512];
MemRef<float, 1> input1(input_float_1, sizes);

__attribute((section(".vdata"))) float input_float_2[512];
MemRef<float, 1> input2(input_float_2, sizes);

__attribute((section(".vdata"))) float output_float_1[512];
MemRef<float, 1> output(output_float_1, sizes);

extern "C" int test() {
_mlir_ciface_forward(&output, &input1, &input2);
return 0;
}
24 changes: 24 additions & 0 deletions tests/pytorch/matmul/matmul.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import torch
import torch._dynamo as dynamo
from torch._inductor.decomposition import decompositions as inductor_decomp

from buddy.compiler.frontend import DynamoCompiler
from buddy.compiler.ops import tosa

def main():
float32_in1 = torch.randn(8, 8, 8).to(torch.float32)
float32_in2 = torch.randn(8, 8, 8).to(torch.float32)

dynamo_compiler = DynamoCompiler(
primary_registry=tosa.ops_registry,
aot_autograd_decomposition=inductor_decomp,
)

graphs = dynamo_compiler.importer(torch.matmul, *(float32_in1, float32_in2))
graph = graphs[0]
graph.lower_to_top_level_ir()

print(graph._imported_module)

if __name__ == "__main__":
main()

0 comments on commit 2357c6c

Please sign in to comment.