From 27fc93c67674858fcdea465d05794f0fd015a6ef Mon Sep 17 00:00:00 2001 From: Avimitin Date: Sun, 25 Aug 2024 02:56:04 +0800 Subject: [PATCH] [tests] add pytorch.matmul Signed-off-by: Avimitin --- .github/cases/blastoise/default.json | 4 ++- tests/pytorch/matmul/build.nix | 39 ++++++++++++++++++++++++++++ tests/pytorch/matmul/matmul.cc | 22 ++++++++++++++++ tests/pytorch/matmul/matmul.py | 24 +++++++++++++++++ 4 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 tests/pytorch/matmul/build.nix create mode 100644 tests/pytorch/matmul/matmul.cc create mode 100644 tests/pytorch/matmul/matmul.py diff --git a/.github/cases/blastoise/default.json b/.github/cases/blastoise/default.json index 316f7d8dc..61d8f3bc1 100644 --- a/.github/cases/blastoise/default.json +++ b/.github/cases/blastoise/default.json @@ -1,5 +1,7 @@ { "pytorch.demo": -1, + "pytorch.lenet": -1, + "pytorch.matmul": -1, "mlir.rvv_vp_intrinsic_add": 436, "mlir.rvv_vp_intrinsic_add_scalable": 584, "mlir.hello": 146, @@ -511,4 +513,4 @@ "rvv_bench.poly1305": 4, "rvv_bench.strlen": 877754, "rvv_bench.utf8_count": 6340466 -} \ No newline at end of file +} diff --git a/tests/pytorch/matmul/build.nix b/tests/pytorch/matmul/build.nix new file mode 100644 index 000000000..9e84d8219 --- /dev/null +++ b/tests/pytorch/matmul/build.nix @@ -0,0 +1,39 @@ +{ buildBuddyE2ETest }: +buildBuddyE2ETest { + caseName = "matmul"; + + optPhase = '' + echo "Lowering forward.mlir" + + python ./matmul.py \ + | buddy-opt --pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-arith, tosa-to-linalg, tosa-to-tensor))" \ + | buddy-opt --convert-elementwise-to-linalg \ + --func-bufferize-dynamic-offset \ + --arith-bufferize \ + --func-bufferize \ + --tensor-bufferize \ + --linalg-bufferize \ + --finalizing-bufferize \ + --batchmatmul-optimize \ + --convert-linalg-to-affine-loops \ + --lower-affine \ + --lower-vector-exp \ + --lower-rvv=rv32 \ + --convert-vector-to-scf \ + --convert-scf-to-cf \ + --llvm-request-c-wrappers \ + --convert-vector-to-llvm \ + --convert-math-to-llvm \ + --convert-math-to-libm \ + --convert-arith-to-llvm \ + --convert-func-to-llvm \ + --expand-strided-metadata \ + --finalize-memref-to-llvm \ + --reconcile-unrealized-casts \ + -o forward-lowered.mlir + + optArtifacts+=( + "forward-lowered.mlir" + ) + ''; +} diff --git a/tests/pytorch/matmul/matmul.cc b/tests/pytorch/matmul/matmul.cc new file mode 100644 index 000000000..b523f0626 --- /dev/null +++ b/tests/pytorch/matmul/matmul.cc @@ -0,0 +1,22 @@ +#include "memref.hpp" + +extern "C" void _mlir_ciface_forward(MemRef *output, + MemRef *arg1, + MemRef *arg2); + +// One-dimension, with length 512 +static const int32_t sizes[3] = {8, 8, 8}; + +__attribute((section(".vdata"))) float input_float_1[512]; +MemRef input1(input_float_1, sizes); + +__attribute((section(".vdata"))) float input_float_2[512]; +MemRef input2(input_float_2, sizes); + +__attribute((section(".vdata"))) float output_float_1[512]; +MemRef output(output_float_1, sizes); + +extern "C" int test() { + _mlir_ciface_forward(&output, &input1, &input2); + return 0; +} diff --git a/tests/pytorch/matmul/matmul.py b/tests/pytorch/matmul/matmul.py new file mode 100644 index 000000000..eb8932ffd --- /dev/null +++ b/tests/pytorch/matmul/matmul.py @@ -0,0 +1,24 @@ +import torch +import torch._dynamo as dynamo +from torch._inductor.decomposition import decompositions as inductor_decomp + +from buddy.compiler.frontend import DynamoCompiler +from buddy.compiler.ops import tosa + +def main(): + float32_in1 = torch.randn(8, 8, 8).to(torch.float32) + float32_in2 = torch.randn(8, 8, 8).to(torch.float32) + + dynamo_compiler = DynamoCompiler( + primary_registry=tosa.ops_registry, + aot_autograd_decomposition=inductor_decomp, + ) + + graphs = dynamo_compiler.importer(torch.matmul, *(float32_in1, float32_in2)) + graph = graphs[0] + graph.lower_to_top_level_ir() + + print(graph._imported_module) + +if __name__ == "__main__": + main()