From ca18bb54b1b25e018549882cc3c646d163d84368 Mon Sep 17 00:00:00 2001 From: Avimitin Date: Sat, 24 Aug 2024 23:57:04 +0800 Subject: [PATCH] [tests] refactor buddy-mlir E2E test framework Signed-off-by: Avimitin --- tests/pytorch/default.nix | 163 ++++++++++++++++------------------ tests/pytorch/demo/build.nix | 31 +++++++ tests/pytorch/demo/config.nix | 30 ------- tests/pytorch/demo/demo.py | 35 ++++---- 4 files changed, 127 insertions(+), 132 deletions(-) create mode 100644 tests/pytorch/demo/build.nix delete mode 100644 tests/pytorch/demo/config.nix diff --git a/tests/pytorch/default.nix b/tests/pytorch/default.nix index 6b54203d4..dca85dcf9 100644 --- a/tests/pytorch/default.nix +++ b/tests/pytorch/default.nix @@ -5,96 +5,87 @@ , findAndBuild , getTestRequiredFeatures , t1main +, callPackage }: let - - builder = makeBuilder { casePrefix = "mlir"; }; + builder = makeBuilder { casePrefix = "pytorch"; }; build = { caseName, sourcePath }: - let - buddyBuildConfig = import (sourcePath + "/config.nix"); - defaultBuddyTranslateArgs = [ "--buddy-to-llvmir" ]; - defaultBuddyLLCArgs = [ - "-mtriple=riscv32" - "-target-abi=ilp32f" - "-mattr=+m,+f,+zve32f" - "-riscv-v-vector-bits-min=128" - ]; - in - builder rec { - inherit caseName; - - src = sourcePath; - - featuresRequired = getTestRequiredFeatures sourcePath; - - nativeBuildInputs = [ buddy-mlir.pyenv buddy-mlir ]; - - pythonArgs = buddyBuildConfig.pythonArgs or [ ]; - buddyTranslateArgs = buddyBuildConfig.buddyTranslateArgs or defaultBuddyTranslateArgs; - buddyLLCArgs = buddyBuildConfig.buddyLLCArgs or defaultBuddyLLCArgs; - buddyIncludes = buddyBuildConfig.includes or [ ]; - - postUnpack = '' - buddyIncludeDir="." - if [ "x$buddyIncludes" != "x" ]; then - mkdir -p buddyInclude - _buddyHeaderArray=( $buddyIncludes ) - for h in "''${_buddyHeaderArray}"; do - cp -v "$h" buddyInclude/"$(stripHash $h)" - done - - buddyIncludeDir=$PWD/buddyInclude - fi - ''; - - buildPhase = '' - runHook preBuild - - echo "Running python with args $pythonArgs" - python $pythonArgs ${caseName}.py - - # Generate multiple buddy-opt call, each will read input from former pipeline - # For example, for buddyOptArgs = [ [ "--arg-a" ], [ "--arg-b" ], [ "--arg-c" ] ] - # This will generate - # - # echo "..." - # buddy-opt forward.mlir --arg-a -o forward-1.mlir - # echo "..." - # buddy-opt forward-1.mlir --arg-b -o forward-2.mlir - # echo "..." - # buddy-opt forward-2.mlir --arg-c -o forward-3.mlir - # - ${lib.concatStringsSep "\n" ( - lib.imap0 - (idx: args: '' - echo "Running buddy-opt with args ${lib.escapeShellArgs args}" - buddy-opt \ - forward${if idx == 0 then "" else "-${toString idx}"}.mlir \ - ${lib.escapeShellArgs args} \ - -o forward-${toString (idx+1)}.mlir - '') - buddyBuildConfig.buddyOptArgs - )} - - # Pick up the last optimized MLIR file - echo "Running buddy-translate with args $buddyTranslateArgs" - buddy-translate forward-${with builtins; toString (length buddyBuildConfig.buddyOptArgs)}.mlir \ - $buddyTranslateArgs -o forward.ll - - echo "Running buddy-llc with args $buddyLLCArgs" - buddy-llc forward.ll $buddyLLCArgs --filetype=obj -o forward.o - - echo "Using include dir $buddyIncludeDir" - $CXX -nostdlib -I$buddyIncludeDir -c ${caseName}.cc -o host.o - $CC -T${linkerScript} \ - host.o forward.o ${t1main} \ - -o $pname.elf - - runHook postBuild - ''; - - meta.description = "testcase '${caseName}', written in MLIR"; + callPackage (sourcePath + "/build.nix") { + buildBuddyE2ETest = { optPhase, ... }@overrides: builder + ({ + inherit caseName; + configurePhase = '' + declare -A optArtifacts translateArtifacts llcArtifacts + ''; + + featuresRequired = getTestRequiredFeatures sourcePath; + + nativeBuildInputs = [ buddy-mlir.pyenv buddy-mlir ]; + + src = sourcePath; + + translatePhase = '' + if [[ -z "$optArtifacts" ]]; then + echo "optPhase doesn't produce optArtifacts, abort" >&2 + exit 1 + fi + + for mlir in ''${optArtifacts[@]}; do + echo "Translating $mlir" + buddy-translate --buddy-to-llvmir "$mlir" -o "$mlir.ll" + + translateArtifacts+=("$mlir.ll") + done + ''; + + llcPhase = '' + if [[ -z "$translateArtifacts" ]]; then + echo "translatePhase doesn't produce translateArtifacts, abort" >&2 + exit 1 + fi + + for llvmir in ''${translateArtifacts[@]}; do + echo "Compiling $llvmir" + buddy-llc "$llvmir" \ + -mtriple=riscv32 \ + -target-abi=ilp32f \ + -mattr=+m,+f,+zve32f \ + -riscv-v-vector-bits-min=128 \ + --filetype=obj \ + -o "$llvmir.o" + + llcArtifacts+=("$llvmir.o") + done + ''; + + linkPhase = '' + if [[ -z "$llcArtifacts" ]]; then + echo "llcPhase doesn't produce any llcArtifacts" >&2 + exit 1 + fi + + echo "Building final binary" + mkdir -p _include + cp ${./memref.hpp} _include/memref.hpp + + $CXX -nostdlib -I _include -c ${caseName}.cc -o host.o + $CC -T${linkerScript} \ + host.o ''${llcArtifacts[@]} ${t1main} \ + -o $pname.elf + ''; + + buildPhase = '' + runHook preBuild + + runPhase optPhase + runPhase translatePhase + runPhase llcPhase + runPhase linkPhase + + runHook postBuild + ''; + } // overrides); }; in findAndBuild ./. build diff --git a/tests/pytorch/demo/build.nix b/tests/pytorch/demo/build.nix new file mode 100644 index 000000000..f773022fa --- /dev/null +++ b/tests/pytorch/demo/build.nix @@ -0,0 +1,31 @@ +{ buildBuddyE2ETest }: +buildBuddyE2ETest { + caseName = "demo"; + + optPhase = '' + echo "Lowering MLIR" + python ./demo.py \ + | buddy-opt --pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith),\ + empty-tensor-to-alloc-tensor, convert-elementwise-to-linalg, arith-bufferize, \ + func.func(linalg-bufferize, tensor-bufferize), func-bufferize)" \ + | buddy-opt --pass-pipeline "builtin.module(func.func(buffer-deallocation-simplification, convert-linalg-to-loops), \ + eliminate-empty-tensors, func.func(llvm-request-c-wrappers))" \ + | buddy-opt --lower-affine \ + --convert-math-to-llvm \ + --convert-math-to-libm \ + --convert-scf-to-cf \ + --convert-arith-to-llvm \ + --expand-strided-metadata \ + --finalize-memref-to-llvm \ + --lower-vector-exp \ + --lower-rvv=rv32 \ + --convert-vector-to-llvm \ + --convert-func-to-llvm \ + --reconcile-unrealized-casts \ + -o forward-lowered.mlir + + optArtifacts+=( + "forward-lowered.mlir" + ) + ''; +} diff --git a/tests/pytorch/demo/config.nix b/tests/pytorch/demo/config.nix deleted file mode 100644 index 0a97e1a09..000000000 --- a/tests/pytorch/demo/config.nix +++ /dev/null @@ -1,30 +0,0 @@ -{ - includes = [ - ../memref.hpp - ]; - - buddyOptArgs = [ - [ - "--pass-pipeline" - "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith), empty-tensor-to-alloc-tensor, convert-elementwise-to-linalg, arith-bufferize, func.func(linalg-bufferize, tensor-bufferize), func-bufferize)" - ] - [ - "--pass-pipeline" - "builtin.module(func.func(buffer-deallocation-simplification, convert-linalg-to-loops), eliminate-empty-tensors, func.func(llvm-request-c-wrappers))" - ] - [ - "--lower-affine" - "--convert-math-to-llvm" - "--convert-math-to-libm" - "--convert-scf-to-cf" - "--convert-arith-to-llvm" - "--expand-strided-metadata" - "--finalize-memref-to-llvm" - "--lower-vector-exp" - "--lower-rvv=rv32" - "--convert-vector-to-llvm" - "--convert-func-to-llvm" - "--reconcile-unrealized-casts" - ] - ]; -} diff --git a/tests/pytorch/demo/demo.py b/tests/pytorch/demo/demo.py index 650227eca..a0d38c840 100644 --- a/tests/pytorch/demo/demo.py +++ b/tests/pytorch/demo/demo.py @@ -9,22 +9,25 @@ def foo(x, y): return x * y + x -# Define the input data. -float32_in1 = torch.randn(512).to(torch.float32) -float32_in2 = torch.randn(512).to(torch.float32) +def main(): + # Define the input data. + float32_in1 = torch.randn(512).to(torch.float32) + float32_in2 = torch.randn(512).to(torch.float32) -# Initialize the dynamo compiler. -dynamo_compiler = DynamoCompiler( - primary_registry=tosa.ops_registry, - aot_autograd_decomposition=inductor_decomp, -) + # Initialize the dynamo compiler. + dynamo_compiler = DynamoCompiler( + primary_registry=tosa.ops_registry, + aot_autograd_decomposition=inductor_decomp, + ) -# Pass the function and input data to the dynamo compiler's importer, the -# importer will first build a graph. Then, lower the graph to top-level IR. -# (tosa, linalg, etc.). Finally, accepts the generated module and weight parameters. -graphs = dynamo_compiler.importer(foo, *(float32_in1, float32_in2)) -graph = graphs[0] -graph.lower_to_top_level_ir() + # Pass the function and input data to the dynamo compiler's importer, the + # importer will first build a graph. Then, lower the graph to top-level IR. + # (tosa, linalg, etc.). Finally, accepts the generated module and weight parameters. + graphs = dynamo_compiler.importer(foo, *(float32_in1, float32_in2)) + graph = graphs[0] + graph.lower_to_top_level_ir() -with open("forward.mlir", "w") as mlir_module: - print(graph._imported_module, file = mlir_module) + print(graph._imported_module) + +if __name__ == "__main__": + main()