From 9372e07868f3244dfe4b579601adddc19b2f8903 Mon Sep 17 00:00:00 2001 From: Muhammad Asif Manzoor Date: Tue, 5 Nov 2024 01:54:05 +0000 Subject: [PATCH] Stable HLO runtime tests --- .../Silicon/StableHLO/Binary/add_op.mlir | 12 ++++++ .../Silicon/StableHLO/Binary/compare_op.mlir | 37 ++++++++++++++++ .../Silicon/StableHLO/Binary/concat_op.mlir | 43 +++++++++++++++++++ .../Silicon/StableHLO/Binary/divide_op.mlir | 12 ++++++ .../Silicon/StableHLO/Binary/logical_op.mlir | 17 ++++++++ .../Silicon/StableHLO/Binary/maximum_op.mlir | 12 ++++++ .../Silicon/StableHLO/Binary/minimum_op.mlir | 12 ++++++ .../Silicon/StableHLO/Binary/multiply_op.mlir | 12 ++++++ .../StableHLO/Binary/remainder_op.mlir | 12 ++++++ .../Silicon/StableHLO/Binary/subtract_op.mlir | 12 ++++++ .../StableHLO/Constant/constant_bf16.mlir | 22 ++++++++++ .../StableHLO/Constant/constant_bool.mlir | 17 ++++++++ .../StableHLO/Constant/constant_f32.mlir | 22 ++++++++++ .../StableHLO/Constant/constant_i16.mlir | 22 ++++++++++ .../StableHLO/Constant/constant_i32.mlir | 22 ++++++++++ .../StableHLO/Constant/constant_i64.mlir | 22 ++++++++++ .../Silicon/StableHLO/Unary/absolute_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/cbrt_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/ceil_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/cosine_op.mlir | 12 ++++++ .../Unary/exponential_minus_one_op.mlir | 12 ++++++ .../StableHLO/Unary/exponential_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/floor_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/isfinite_op.mlir | 12 ++++++ .../StableHLO/Unary/log_plus_one_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/logical_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/negate_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/rsqrt_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/sine_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/sqrt_op.mlir | 12 ++++++ .../Silicon/StableHLO/Unary/tranpose_op.mlir | 12 ++++++ .../Silicon/StableHLO/broadcast_op.mlir | 13 ++++++ .../Silicon/StableHLO/composite_op.mlir | 19 ++++++++ test/ttmlir/Silicon/StableHLO/conv2d_op.mlir | 22 ++++++++++ test/ttmlir/Silicon/StableHLO/convert_op.mlir | 21 +++++++++ .../Silicon/StableHLO/dot_general_op.mlir | 12 ++++++ .../StableHLO/get_dimension_size_op.mlir | 12 ++++++ .../Silicon/StableHLO/maxpool2d_op.mlir | 17 ++++++++ .../Silicon/StableHLO/mnist_inference.mlir | 41 ++++++++++++++++++ .../Silicon/StableHLO/reduce_add_op.mlir | 12 ++++++ .../Silicon/StableHLO/reduce_maximum_op.mlir | 12 ++++++ test/ttmlir/Silicon/StableHLO/rehsape_op.mlir | 12 ++++++ .../Silicon/StableHLO/scalar_add_op.mlir | 12 ++++++ test/ttmlir/Silicon/StableHLO/sign_op.mlir | 12 ++++++ test/ttmlir/Silicon/StableHLO/slice_op.mlir | 16 +++++++ 45 files changed, 721 insertions(+) create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/add_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/compare_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/concat_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/divide_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/logical_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/maximum_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/minimum_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/multiply_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/remainder_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Binary/subtract_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Constant/constant_bf16.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Constant/constant_bool.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Constant/constant_f32.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Constant/constant_i16.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Constant/constant_i32.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Constant/constant_i64.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/absolute_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/cbrt_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/ceil_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/cosine_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/exponential_minus_one_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/exponential_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/floor_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/isfinite_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/log_plus_one_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/logical_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/negate_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/rsqrt_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/sine_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/sqrt_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/Unary/tranpose_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/broadcast_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/composite_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/conv2d_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/convert_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/dot_general_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/get_dimension_size_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/maxpool2d_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/mnist_inference.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/rehsape_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/scalar_add_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/sign_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/slice_op.mlir diff --git a/test/ttmlir/Silicon/StableHLO/Binary/add_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/add_op.mlir new file mode 100644 index 000000000..e31d97eb6 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/add_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_add attributes {} { + func.func public @test_add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/compare_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/compare_op.mlir new file mode 100644 index 000000000..e9716401d --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/compare_op.mlir @@ -0,0 +1,37 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_compare attributes {} { + func.func public @test_eq(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare EQ, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_ne(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare NE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare GE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_gt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare GT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_le(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare LE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_lt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare LT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/concat_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/concat_op.mlir new file mode 100644 index 000000000..c7c17fbac --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/concat_op.mlir @@ -0,0 +1,43 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_concat attributes {} { + func.func public @test_concat_0(%arg0: tensor<32x32xf32>, %arg1: tensor<64x32xf32>) -> tensor<96x32xf32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 0 : i64 + } : (tensor<32x32xf32>, tensor<64x32xf32>) -> tensor<96x32xf32> + return %0 : tensor<96x32xf32> + } + + func.func public @test_concat_1(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 1 : i64 + } : (tensor<32x32xf32>, tensor<32x64xf32>) -> tensor<32x96xf32> + return %0 : tensor<32x96xf32> + } + + + func.func public @test_concat_2(%arg0: tensor<128x64xf32>, %arg1: tensor<128x96xf32>) -> tensor<128x160xf32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 1 : i64 + } : (tensor<128x64xf32>, tensor<128x96xf32>) -> tensor<128x160xf32> + return %0 : tensor<128x160xf32> + } + + func.func public @test_concat_3(%arg0: tensor<64x32xf32>, %arg1: tensor<64x64xf32>) -> tensor<64x96xf32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 1 : i64 + } : (tensor<64x32xf32>, tensor<64x64xf32>) -> tensor<64x96xf32> + return %0 : tensor<64x96xf32> + } + + func.func public @test_concat_4(%arg0: tensor<32x32x32x32xf32>, %arg1: tensor<32x32x32x64xf32>) -> tensor<32x32x32x96xf32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 3 : i64 + } : (tensor<32x32x32x32xf32>, tensor<32x32x32x64xf32>) -> tensor<32x32x32x96xf32> + return %0 : tensor<32x32x32x96xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/divide_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/divide_op.mlir new file mode 100644 index 000000000..c85cbf60e --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/divide_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_divice attributes {} { + func.func public @test_divide(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.divide %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/logical_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/logical_op.mlir new file mode 100644 index 000000000..82e94d687 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/logical_op.mlir @@ -0,0 +1,17 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_compare attributes {} { + func.func public @logical_and(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> { + %0 = stablehlo.and %arg0, %arg1 : tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @logical_or(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> { + %0 = stablehlo.or %arg0, %arg1 : tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/maximum_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/maximum_op.mlir new file mode 100644 index 000000000..72e0bd14c --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/maximum_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_maximum attributes {} { + func.func public @test_maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.maximum %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/minimum_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/minimum_op.mlir new file mode 100644 index 000000000..7d7f3cac4 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/minimum_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_minimum attributes {} { + func.func public @test_minimum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.minimum %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/multiply_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/multiply_op.mlir new file mode 100644 index 000000000..95460ea6a --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/multiply_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_multiply attributes {} { + func.func public @test_multiply(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { + %0 = stablehlo.multiply %arg0, %arg1 : tensor<13x21x3xf32> + return %0 : tensor<13x21x3xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/remainder_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/remainder_op.mlir new file mode 100644 index 000000000..67a60c659 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/remainder_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_remainder attributes {} { + func.func public @test_remainder(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.remainder %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Binary/subtract_op.mlir b/test/ttmlir/Silicon/StableHLO/Binary/subtract_op.mlir new file mode 100644 index 000000000..003c11dfb --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Binary/subtract_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_subtract attributes {} { + func.func public @test_subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.subtract %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Constant/constant_bf16.mlir b/test/ttmlir/Silicon/StableHLO/Constant/constant_bf16.mlir new file mode 100644 index 000000000..bd438963c --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Constant/constant_bf16.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_bfloat16_scalar() -> tensor { + %0 = stablehlo.constant dense<3.0> : tensor + return %0 : tensor + } + + func.func public @test_bfloat16_empty() -> tensor<64x128xbf16> { + %0 = stablehlo.constant dense<0.0> : tensor<64x128xbf16> + return %0 : tensor<64x128xbf16> + } + + func.func public @test_bfloat16_splat() -> tensor<64x128xbf16> { + %0 = stablehlo.constant dense<3.0> : tensor<64x128xbf16> + return %0 : tensor<64x128xbf16> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Constant/constant_bool.mlir b/test/ttmlir/Silicon/StableHLO/Constant/constant_bool.mlir new file mode 100644 index 000000000..8fe2ceefc --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Constant/constant_bool.mlir @@ -0,0 +1,17 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_boolean_scalar() -> tensor { + %0 = stablehlo.constant dense : tensor + return %0 : tensor + } + + func.func public @test_boolean_splat() -> tensor<64xi1> { + %0 = stablehlo.constant dense : tensor<64xi1> + return %0 : tensor<64xi1> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Constant/constant_f32.mlir b/test/ttmlir/Silicon/StableHLO/Constant/constant_f32.mlir new file mode 100644 index 000000000..09ce2f18a --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Constant/constant_f32.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_float_scalar() -> tensor { + %0 = stablehlo.constant dense<0.3> : tensor + return %0 : tensor + } + + func.func public @test_float_empty() -> tensor<64xf32> { + %0 = stablehlo.constant dense<0.0> : tensor<64xf32> + return %0 : tensor<64xf32> + } + + func.func public @test_float_splat() -> tensor<64xf32> { + %0 = stablehlo.constant dense<0.3> : tensor<64xf32> + return %0 : tensor<64xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Constant/constant_i16.mlir b/test/ttmlir/Silicon/StableHLO/Constant/constant_i16.mlir new file mode 100644 index 000000000..1b757844d --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Constant/constant_i16.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_int32_scalar() -> tensor { + %0 = stablehlo.constant dense<3> : tensor + return %0 : tensor + } + + func.func public @test_int32_empty() -> tensor<64x128xi16> { + %0 = stablehlo.constant dense<0> : tensor<64x128xi16> + return %0 : tensor<64x128xi16> + } + + func.func public @test_int32_splat() -> tensor<64x128xi16> { + %0 = stablehlo.constant dense<3> : tensor<64x128xi16> + return %0 : tensor<64x128xi16> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Constant/constant_i32.mlir b/test/ttmlir/Silicon/StableHLO/Constant/constant_i32.mlir new file mode 100644 index 000000000..db88dc2d9 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Constant/constant_i32.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_int32_scalar() -> tensor { + %0 = stablehlo.constant dense<3> : tensor + return %0 : tensor + } + + func.func public @test_int32_empty() -> tensor<64x128xi32> { + %0 = stablehlo.constant dense<0> : tensor<64x128xi32> + return %0 : tensor<64x128xi32> + } + + func.func public @test_int32_splat() -> tensor<64x128xi32> { + %0 = stablehlo.constant dense<3> : tensor<64x128xi32> + return %0 : tensor<64x128xi32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Constant/constant_i64.mlir b/test/ttmlir/Silicon/StableHLO/Constant/constant_i64.mlir new file mode 100644 index 000000000..ff1ece580 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Constant/constant_i64.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_int64_scalar() -> tensor { + %0 = stablehlo.constant dense<3> : tensor + return %0 : tensor + } + + func.func public @test_int64_empty() -> tensor<64x128xi64> { + %0 = stablehlo.constant dense<0> : tensor<64x128xi64> + return %0 : tensor<64x128xi64> + } + + func.func public @test_int64_splat() -> tensor<64x128xi64> { + %0 = stablehlo.constant dense<3> : tensor<64x128xi64> + return %0 : tensor<64x128xi64> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/absolute_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/absolute_op.mlir new file mode 100644 index 000000000..81a3c70d8 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/absolute_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_abs attributes {} { + func.func public @test_abs(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.abs %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/cbrt_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/cbrt_op.mlir new file mode 100644 index 000000000..3afff418a --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/cbrt_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_rsqrt attributes {} { + func.func public @test_cbrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.cbrt %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/ceil_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/ceil_op.mlir new file mode 100644 index 000000000..fb6a7ef37 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/ceil_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_ceil attributes {} { + func.func public @test_ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.ceil %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/cosine_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/cosine_op.mlir new file mode 100644 index 000000000..8b90d1a7d --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/cosine_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_cosine attributes {} { + func.func public @test_cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.cosine %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/exponential_minus_one_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/exponential_minus_one_op.mlir new file mode 100644 index 000000000..5ff8e6865 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/exponential_minus_one_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_expm1 attributes {} { + func.func public @test_expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.exponential_minus_one %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/exponential_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/exponential_op.mlir new file mode 100644 index 000000000..cfa263c71 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/exponential_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_exp attributes {} { + func.func public @test_exp(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.exponential %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/floor_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/floor_op.mlir new file mode 100644 index 000000000..d0614341f --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/floor_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_floor attributes {} { + func.func public @test_floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.floor %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/isfinite_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/isfinite_op.mlir new file mode 100644 index 000000000..0de6e7ee9 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/isfinite_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_isfinite attributes {} { + func.func public @test_isfinite(%arg0: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.is_finite %arg0 : (tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/log_plus_one_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/log_plus_one_op.mlir new file mode 100644 index 000000000..a77ed68e8 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/log_plus_one_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_log_plus_one attributes {} { + func.func public @test_log_plus_one(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.log_plus_one %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/logical_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/logical_op.mlir new file mode 100644 index 000000000..575f9ea34 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/logical_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_compare attributes {} { + func.func public @logical_not(%arg0: tensor<64x128xi1>) -> tensor<64x128xi1> { + %0 = stablehlo.not %arg0 : tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/negate_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/negate_op.mlir new file mode 100644 index 000000000..68ece06ea --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/negate_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_neg attributes {} { + func.func public @test_neg(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.negate %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/rsqrt_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/rsqrt_op.mlir new file mode 100644 index 000000000..f9fdc321a --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/rsqrt_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_rsqrt attributes {} { + func.func public @test_rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.rsqrt %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/sine_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/sine_op.mlir new file mode 100644 index 000000000..19bcec2d4 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/sine_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_sine attributes {} { + func.func public @test_sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.sine %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/sqrt_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/sqrt_op.mlir new file mode 100644 index 000000000..798280d65 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/sqrt_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_sqrt attributes {} { + func.func public @test_sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.sqrt %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/Unary/tranpose_op.mlir b/test/ttmlir/Silicon/StableHLO/Unary/tranpose_op.mlir new file mode 100644 index 000000000..2314dae90 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/Unary/tranpose_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_transpose attributes {} { + func.func public @test_transpose(%arg0: tensor<64x128xf32>) -> tensor<128x64xf32> { + %0 = stablehlo.transpose %arg0, dims = [1,0] : (tensor<64x128xf32>) -> tensor<128x64xf32> + return %0 : tensor<128x64xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/broadcast_op.mlir b/test/ttmlir/Silicon/StableHLO/broadcast_op.mlir new file mode 100644 index 000000000..928bcbba4 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/broadcast_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_broadcast attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} { + func.func public @main(%arg0: tensor<1xf32> {mhlo.layout_mode = "default"}, %arg1: tensor<64x128xf32> {mhlo.layout_mode = "default"}) -> (tensor<64x128xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) { + %0 = stablehlo.broadcast_in_dim %arg0, dims = [1] : (tensor<1xf32>) -> tensor<64x128xf32> + %1 = stablehlo.maximum %0, %arg1 : tensor<64x128xf32> + return %1 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/composite_op.mlir b/test/ttmlir/Silicon/StableHLO/composite_op.mlir new file mode 100644 index 000000000..a44ce1e65 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/composite_op.mlir @@ -0,0 +1,19 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_add attributes {} { + func.func private @add_impl(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } + + func.func public @main(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %results = stablehlo.composite "jit_eltwise_add.my_add" %arg0, %arg1 { + decomposition = @add_impl + } : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + return %results : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/conv2d_op.mlir b/test/ttmlir/Silicon/StableHLO/conv2d_op.mlir new file mode 100644 index 000000000..a6c0641e0 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/conv2d_op.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn +// UNSUPPORTED: true + +module @jit_convolution attributes {} { + func.func public @test_convolution(%arg0: tensor<1x128x128x32xf32>, %arg1: tensor<64x32x3x3xf32>) -> tensor<1x128x128x64xf32> { + %0 = stablehlo.convolution(%arg0, %arg1) + dim_numbers = [b, 0, 1, f]x[o, i, 0, 1]->[b, 0, 1, f], + window = { + stride = [1, 1], + pad = [[1, 1], [1, 1]], + } { + feature_group_count = 1 : i64, + batch_group_count = 1 : i64, + precision_config = [#stablehlo, #stablehlo] + } : (tensor<1x128x128x32xf32>, tensor<64x32x3x3xf32>) -> tensor<1x128x128x64xf32> + return %0 : tensor<1x128x128x64xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/convert_op.mlir b/test/ttmlir/Silicon/StableHLO/convert_op.mlir new file mode 100644 index 000000000..034f783d5 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/convert_op.mlir @@ -0,0 +1,21 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_convert attributes {} { + func.func public @test_convert(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { + %0 = stablehlo.convert %arg0 : (tensor<64x128xf32>) -> tensor<64x128xbf16> + return %0 : tensor<64x128xbf16> + } +} + +module @jit_eltwise_add attributes {} { + func.func public @test_add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xbf16> { + %0 = stablehlo.convert %arg0 : (tensor<64x128xf32>) -> tensor<64x128xbf16> + %1 = stablehlo.convert %arg1 : (tensor<64x128xf32>) -> tensor<64x128xbf16> + %2 = stablehlo.add %0, %1 : tensor<64x128xbf16> + return %2 : tensor<64x128xbf16> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/dot_general_op.mlir b/test/ttmlir/Silicon/StableHLO/dot_general_op.mlir new file mode 100644 index 000000000..72b6638f3 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/dot_general_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_dot_general attributes {} { + func.func public @test_dot_general(%arg0 : tensor<16x32xf32>, %arg1 : tensor<32x8xf32>) -> tensor<16x8xf32> { + %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<16x32xf32>, tensor<32x8xf32>) -> tensor<16x8xf32> + return %0 : tensor<16x8xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/get_dimension_size_op.mlir b/test/ttmlir/Silicon/StableHLO/get_dimension_size_op.mlir new file mode 100644 index 000000000..91f397fee --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/get_dimension_size_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_get_dimension_size attributes {} { + func.func public @test_get_dimension_size(%arg0: tensor<64x128xf32>) -> tensor { + %0 = stablehlo.get_dimension_size %arg0, dim = 1 : (tensor<64x128xf32>) -> tensor + return %0 : tensor + } +} diff --git a/test/ttmlir/Silicon/StableHLO/maxpool2d_op.mlir b/test/ttmlir/Silicon/StableHLO/maxpool2d_op.mlir new file mode 100644 index 000000000..c049d1281 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/maxpool2d_op.mlir @@ -0,0 +1,17 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn +// UNSUPPORTED: true + +func.func public @test_maxpool2d(%arg0: tensor<1x128x128x32xbf16>) -> tensor<1x64x64x32xbf16> { + %0 = stablehlo.constant dense<0xFF80> : tensor + %1 = stablehlo.broadcast_in_dim %0, dims = [] : (tensor) -> tensor + %2 = "stablehlo.reduce_window"(%arg0, %1) <{padding = dense<[[0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi64>, window_dimensions = array, window_strides = array}> ({ + ^bb0(%arg2: tensor, %arg3: tensor): + %3 = stablehlo.maximum %arg2, %arg3 : tensor + stablehlo.return %3 : tensor + }) : (tensor<1x128x128x32xbf16>, tensor) -> tensor<1x64x64x32xbf16> + return %2 : tensor<1x64x64x32xbf16> +} diff --git a/test/ttmlir/Silicon/StableHLO/mnist_inference.mlir b/test/ttmlir/Silicon/StableHLO/mnist_inference.mlir new file mode 100644 index 000000000..6958d7f92 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/mnist_inference.mlir @@ -0,0 +1,41 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn +// TODO: Enable when all ops are supported. +// UNSUPPORTED: true +module @jit_predict attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} { + func.func public @main(%arg0: tensor<512x784xf32> {mhlo.layout_mode = "default"}, %arg1: tensor<512xf32> {mhlo.layout_mode = "default"}, %arg2: tensor<10x512xf32> {mhlo.layout_mode = "default"}, %arg3: tensor<10xf32> {mhlo.layout_mode = "default"}, %arg4: tensor<128x784xui8> {mhlo.layout_mode = "default"}) -> (tensor<128x10xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) { + %0 = stablehlo.convert %arg0 : tensor<512x784xf32> + %1 = stablehlo.convert %arg4 : (tensor<128x784xui8>) -> tensor<128x784xf32> + %2 = stablehlo.dot_general %0, %1, contracting_dims = [1] x [1], precision = [DEFAULT, DEFAULT] : (tensor<512x784xf32>, tensor<128x784xf32>) -> tensor<512x128xf32> + %3 = stablehlo.transpose %2, dims = [1, 0] : (tensor<512x128xf32>) -> tensor<128x512xf32> + %4 = stablehlo.broadcast_in_dim %arg1, dims = [1] : (tensor<512xf32>) -> tensor<1x512xf32> + %5 = stablehlo.broadcast_in_dim %4, dims = [0, 1] : (tensor<1x512xf32>) -> tensor<128x512xf32> + %6 = stablehlo.add %3, %5 : tensor<128x512xf32> + %cst = stablehlo.constant dense<0.000000e+00> : tensor + %7 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor) -> tensor<128x512xf32> + %8 = stablehlo.maximum %7, %6 : tensor<128x512xf32> + %9 = stablehlo.dot_general %arg2, %8, contracting_dims = [1] x [1], precision = [DEFAULT, DEFAULT] : (tensor<10x512xf32>, tensor<128x512xf32>) -> tensor<10x128xf32> + %10 = stablehlo.transpose %9, dims = [1, 0] : (tensor<10x128xf32>) -> tensor<128x10xf32> + %11 = stablehlo.broadcast_in_dim %arg3, dims = [1] : (tensor<10xf32>) -> tensor<1x10xf32> + %12 = stablehlo.broadcast_in_dim %11, dims = [0, 1] : (tensor<1x10xf32>) -> tensor<128x10xf32> + %13 = stablehlo.add %10, %12 : tensor<128x10xf32> + %cst_0 = stablehlo.constant dense<0xFF800000> : tensor + %14 = stablehlo.reduce(%13 init: %cst_0) applies stablehlo.maximum across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> + %cst_1 = stablehlo.constant dense<0xFF800000> : tensor + %15 = stablehlo.broadcast_in_dim %cst_1, dims = [] : (tensor) -> tensor<128xf32> + %16 = stablehlo.maximum %15, %14 : tensor<128xf32> + %17 = stablehlo.broadcast_in_dim %16, dims = [0] : (tensor<128xf32>) -> tensor<128x1xf32> + %18 = stablehlo.broadcast_in_dim %17, dims = [0, 1] : (tensor<128x1xf32>) -> tensor<128x10xf32> + %19 = stablehlo.subtract %13, %18 : tensor<128x10xf32> + %20 = stablehlo.exponential %19 : tensor<128x10xf32> + %cst_2 = stablehlo.constant dense<0.000000e+00> : tensor + %21 = stablehlo.reduce(%20 init: %cst_2) applies stablehlo.add across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> + %22 = stablehlo.broadcast_in_dim %21, dims = [0] : (tensor<128xf32>) -> tensor<128x1xf32> + %23 = stablehlo.broadcast_in_dim %22, dims = [0, 1] : (tensor<128x1xf32>) -> tensor<128x10xf32> + %24 = stablehlo.divide %20, %23 : tensor<128x10xf32> + return %24 : tensor<128x10xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir b/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir new file mode 100644 index 000000000..1fcad52bd --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn +// UNSUPPORTED: true +module @jit_reduce_add attributes {} { + func.func public @test_reduce_add(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { + %0 = stablehlo.reduce(%arg0 init: %cst_0) applies stablehlo.add across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> + return %0 : tensor<128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir b/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir new file mode 100644 index 000000000..d620c8c50 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn +// UNSUPPORTED: true +module @jit_reduce_maximum attributes {} { + func.func public @test_reduce_maximum(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { + %0 = stablehlo.reduce(%arg0 init: %cst_0) applies stablehlo.maximum across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> + return %0 : tensor<128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/rehsape_op.mlir b/test/ttmlir/Silicon/StableHLO/rehsape_op.mlir new file mode 100644 index 000000000..830ba29b8 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/rehsape_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_module_reshape attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} { + func.func public @main(%arg0: tensor<1x64x64x64xf32> {mhlo.layout_mode = "default", mhlo.sharding = "{replicated}"}) -> (tensor<1x1x4096x64xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) { + %0 = stablehlo.reshape %arg0 : (tensor<1x64x64x64xf32>) -> tensor<1x1x4096x64xf32> + return %0 : tensor<1x1x4096x64xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/scalar_add_op.mlir b/test/ttmlir/Silicon/StableHLO/scalar_add_op.mlir new file mode 100644 index 000000000..26f697464 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/scalar_add_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_scalar_add attributes {} { + func.func public @test_scalar_add(%arg0: tensor, %arg1: tensor) -> tensor { + %0 = stablehlo.add %arg0, %arg1 : tensor + return %0 : tensor + } +} diff --git a/test/ttmlir/Silicon/StableHLO/sign_op.mlir b/test/ttmlir/Silicon/StableHLO/sign_op.mlir new file mode 100644 index 000000000..3f9ea54ec --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/sign_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_sign attributes {} { + func.func public @test_sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.sign %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/slice_op.mlir b/test/ttmlir/Silicon/StableHLO/slice_op.mlir new file mode 100644 index 000000000..b62417dd1 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/slice_op.mlir @@ -0,0 +1,16 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_subtract attributes {} { + func.func @slice_op(%arg0: tensor<32x64xbf16>) -> tensor<8x8xbf16> { + %result = "stablehlo.slice"(%arg0) { + start_indices = array, + limit_indices = array, + strides = array + } : (tensor<32x64xbf16>) -> tensor<8x8xbf16> + return %result : tensor<8x8xbf16> + } +}