Skip to content

Commit

Permalink
Stable HLO runtime tests
Browse files Browse the repository at this point in the history
  • Loading branch information
mmanzoorTT committed Nov 12, 2024
1 parent 14cd5d0 commit 9372e07
Show file tree
Hide file tree
Showing 45 changed files with 721 additions and 0 deletions.
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/add_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_add attributes {} {
func.func public @test_add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
37 changes: 37 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/compare_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_compare attributes {} {
func.func public @test_eq(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare EQ, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_ne(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare NE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare GE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_gt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare GT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_le(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare LE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_lt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare LT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}
}
43 changes: 43 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/concat_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_concat attributes {} {
func.func public @test_concat_0(%arg0: tensor<32x32xf32>, %arg1: tensor<64x32xf32>) -> tensor<96x32xf32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 0 : i64
} : (tensor<32x32xf32>, tensor<64x32xf32>) -> tensor<96x32xf32>
return %0 : tensor<96x32xf32>
}

func.func public @test_concat_1(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<32x32xf32>, tensor<32x64xf32>) -> tensor<32x96xf32>
return %0 : tensor<32x96xf32>
}


func.func public @test_concat_2(%arg0: tensor<128x64xf32>, %arg1: tensor<128x96xf32>) -> tensor<128x160xf32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<128x64xf32>, tensor<128x96xf32>) -> tensor<128x160xf32>
return %0 : tensor<128x160xf32>
}

func.func public @test_concat_3(%arg0: tensor<64x32xf32>, %arg1: tensor<64x64xf32>) -> tensor<64x96xf32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<64x32xf32>, tensor<64x64xf32>) -> tensor<64x96xf32>
return %0 : tensor<64x96xf32>
}

func.func public @test_concat_4(%arg0: tensor<32x32x32x32xf32>, %arg1: tensor<32x32x32x64xf32>) -> tensor<32x32x32x96xf32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 3 : i64
} : (tensor<32x32x32x32xf32>, tensor<32x32x32x64xf32>) -> tensor<32x32x32x96xf32>
return %0 : tensor<32x32x32x96xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/divide_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_divice attributes {} {
func.func public @test_divide(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.divide %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
17 changes: 17 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/logical_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_compare attributes {} {
func.func public @logical_and(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> {
%0 = stablehlo.and %arg0, %arg1 : tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @logical_or(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> {
%0 = stablehlo.or %arg0, %arg1 : tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/maximum_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_maximum attributes {} {
func.func public @test_maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.maximum %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/minimum_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_minimum attributes {} {
func.func public @test_minimum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.minimum %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/multiply_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_multiply attributes {} {
func.func public @test_multiply(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = stablehlo.multiply %arg0, %arg1 : tensor<13x21x3xf32>
return %0 : tensor<13x21x3xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/remainder_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_remainder attributes {} {
func.func public @test_remainder(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.remainder %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/subtract_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_subtract attributes {} {
func.func public @test_subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.subtract %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Constant/constant_bf16.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_bfloat16_scalar() -> tensor<bf16> {
%0 = stablehlo.constant dense<3.0> : tensor<bf16>
return %0 : tensor<bf16>
}

func.func public @test_bfloat16_empty() -> tensor<64x128xbf16> {
%0 = stablehlo.constant dense<0.0> : tensor<64x128xbf16>
return %0 : tensor<64x128xbf16>
}

func.func public @test_bfloat16_splat() -> tensor<64x128xbf16> {
%0 = stablehlo.constant dense<3.0> : tensor<64x128xbf16>
return %0 : tensor<64x128xbf16>
}
}
17 changes: 17 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Constant/constant_bool.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_boolean_scalar() -> tensor<i1> {
%0 = stablehlo.constant dense<true> : tensor<i1>
return %0 : tensor<i1>
}

func.func public @test_boolean_splat() -> tensor<64xi1> {
%0 = stablehlo.constant dense<true> : tensor<64xi1>
return %0 : tensor<64xi1>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Constant/constant_f32.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_float_scalar() -> tensor<f32> {
%0 = stablehlo.constant dense<0.3> : tensor<f32>
return %0 : tensor<f32>
}

func.func public @test_float_empty() -> tensor<64xf32> {
%0 = stablehlo.constant dense<0.0> : tensor<64xf32>
return %0 : tensor<64xf32>
}

func.func public @test_float_splat() -> tensor<64xf32> {
%0 = stablehlo.constant dense<0.3> : tensor<64xf32>
return %0 : tensor<64xf32>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Constant/constant_i16.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_int32_scalar() -> tensor<i16> {
%0 = stablehlo.constant dense<3> : tensor<i16>
return %0 : tensor<i16>
}

func.func public @test_int32_empty() -> tensor<64x128xi16> {
%0 = stablehlo.constant dense<0> : tensor<64x128xi16>
return %0 : tensor<64x128xi16>
}

func.func public @test_int32_splat() -> tensor<64x128xi16> {
%0 = stablehlo.constant dense<3> : tensor<64x128xi16>
return %0 : tensor<64x128xi16>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Constant/constant_i32.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_int32_scalar() -> tensor<i32> {
%0 = stablehlo.constant dense<3> : tensor<i32>
return %0 : tensor<i32>
}

func.func public @test_int32_empty() -> tensor<64x128xi32> {
%0 = stablehlo.constant dense<0> : tensor<64x128xi32>
return %0 : tensor<64x128xi32>
}

func.func public @test_int32_splat() -> tensor<64x128xi32> {
%0 = stablehlo.constant dense<3> : tensor<64x128xi32>
return %0 : tensor<64x128xi32>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Constant/constant_i64.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_int64_scalar() -> tensor<i64> {
%0 = stablehlo.constant dense<3> : tensor<i64>
return %0 : tensor<i64>
}

func.func public @test_int64_empty() -> tensor<64x128xi64> {
%0 = stablehlo.constant dense<0> : tensor<64x128xi64>
return %0 : tensor<64x128xi64>
}

func.func public @test_int64_splat() -> tensor<64x128xi64> {
%0 = stablehlo.constant dense<3> : tensor<64x128xi64>
return %0 : tensor<64x128xi64>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Unary/absolute_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_abs attributes {} {
func.func public @test_abs(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.abs %arg0 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Unary/cbrt_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_rsqrt attributes {} {
func.func public @test_cbrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.cbrt %arg0 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Unary/ceil_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_ceil attributes {} {
func.func public @test_ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.ceil %arg0 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Unary/cosine_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_cosine attributes {} {
func.func public @test_cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.cosine %arg0 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Unary/exponential_minus_one_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_expm1 attributes {} {
func.func public @test_expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.exponential_minus_one %arg0 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
Loading

0 comments on commit 9372e07

Please sign in to comment.