Skip to content

Commit

Permalink
Stable HLO runtime tests
Browse files Browse the repository at this point in the history
  • Loading branch information
mmanzoorTT committed Nov 5, 2024
1 parent 3dbf089 commit ed1125f
Show file tree
Hide file tree
Showing 45 changed files with 798 additions and 0 deletions.
15 changes: 15 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/add_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

module @jit_eltwise_add attributes {} {
func.func public @test_add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
37 changes: 37 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/compare_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_compare attributes {} {
func.func public @test_eq(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare EQ, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_ne(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare NE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare GE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_gt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare GT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_le(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare LE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_lt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
%0 = stablehlo.compare LT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}
}
70 changes: 70 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/concat_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_concat attributes {} {
func.func public @test_concat(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<32x32xf32>, tensor<32x64xf32>) -> tensor<32x96xf32>
return %0 : tensor<32x96xf32>
}

func.func public @test_concat_2(%arg0: tensor<3x2xi64>, %arg1: tensor<1x2xi64>) -> tensor<4x2xi64> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 0 : i64
} : (tensor<3x2xi64>, tensor<1x2xi64>) -> tensor<4x2xi64>
return %0 : tensor<4x2xi64>
}

func.func public @test_concat_3(%arg0: tensor<4x3xf32>, %arg1: tensor<4x5xf32>) -> tensor<4x8xf32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<4x3xf32>, tensor<4x5xf32>) -> tensor<4x8xf32>
return %0 : tensor<4x8xf32>
}

func.func public @test_concat_4(%arg0: tensor<128x64xf32>, %arg1: tensor<128x96xf32>) -> tensor<128x160xf32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<128x64xf32>, tensor<128x96xf32>) -> tensor<128x160xf32>
return %0 : tensor<128x160xf32>
}

func.func public @test_concat_5(%arg0: tensor<256x512xi64>, %arg1: tensor<256x256xi64>) -> tensor<256x768xi64> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<256x512xi64>, tensor<256x256xi64>) -> tensor<256x768xi64>
return %0 : tensor<256x768xi64>
}

func.func public @test_concat_6(%arg0: tensor<64x32xf64>, %arg1: tensor<64x64xf64>) -> tensor<64x96xf64> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<64x32xf64>, tensor<64x64xf64>) -> tensor<64x96xf64>
return %0 : tensor<64x96xf64>
}

func.func public @test_concat_7(%arg0: tensor<1000x128xi32>, %arg1: tensor<500x128xi32>) -> tensor<1500x128xi32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 0 : i64
} : (tensor<1000x128xi32>, tensor<500x128xi32>) -> tensor<1500x128xi32>
return %0 : tensor<1500x128xi32>
}

func.func public @test_concat_8(%arg0: tensor<3x2x4x5xf64>, %arg1: tensor<3x2x4x3xf64>) -> tensor<3x2x4x8xf64> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 3 : i64
} : (tensor<3x2x4x5xf64>, tensor<3x2x4x3xf64>) -> tensor<3x2x4x8xf64>
return %0 : tensor<3x2x4x8xf64>
}

func.func public @test_concat_9(%arg0: tensor<8x4x6xi32>, %arg1: tensor<8x4x2xi32>) -> tensor<8x4x8xi32> {
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 2 : i64
} : (tensor<8x4x6xi32>, tensor<8x4x2xi32>) -> tensor<8x4x8xi32>
return %0 : tensor<8x4x8xi32>
}
}
13 changes: 13 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/divide_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module @jit_eltwise_divice attributes {} {
func.func public @test_divide(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.divide %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
27 changes: 27 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/logical_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_compare attributes {} {
func.func public @logical_and(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> {
%0 = stablehlo.and %arg0, %arg1 : tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @logical_or(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> {
%0 = stablehlo.or %arg0, %arg1 : tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @logical_not(%arg0: tensor<64x128xi1>) -> tensor<64x128xi1> {
%0 = stablehlo.not %arg0 : tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @logical_not_scalar(%arg0: tensor<i1>) -> tensor<i1> {
%0 = stablehlo.not %arg0 : tensor<i1>
return %0 : tensor<i1>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/maximum_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_maximum attributes {} {
func.func public @test_maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.maximum %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/minimum_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_minimum attributes {} {
func.func public @test_minimum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.minimum %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
15 changes: 15 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/multiply_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

module @jit_eltwise_multiply attributes {} {
func.func public @test_multiply(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = stablehlo.multiply %arg0, %arg1 : tensor<13x21x3xf32>
return %0 : tensor<13x21x3xf32>
}
}
13 changes: 13 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/remainder_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module @jit_eltwise_remainder attributes {} {
func.func public @test_remainder(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.remainder %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
13 changes: 13 additions & 0 deletions test/ttmlir/Silicon/StableHLO/binary/subtract_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module @jit_eltwise_subtract attributes {} {
func.func public @test_subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.subtract %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
13 changes: 13 additions & 0 deletions test/ttmlir/Silicon/StableHLO/broadcast_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_broadcast attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} {
func.func public @main(%arg0: tensor<1xf32> {mhlo.layout_mode = "default"}, %arg1: tensor<64x128xf32> {mhlo.layout_mode = "default"}) -> (tensor<64x128xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) {
%0 = stablehlo.broadcast_in_dim %arg0, dims = [1] : (tensor<1xf32>) -> tensor<64x128xf32>
%1 = stablehlo.maximum %0, %arg1 : tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
}
19 changes: 19 additions & 0 deletions test/ttmlir/Silicon/StableHLO/composite_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_eltwise_add attributes {} {
func.func private @add_impl(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}

func.func public @main(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%results = stablehlo.composite "jit_eltwise_add.my_add" %arg0, %arg1 {
decomposition = @add_impl
} : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %results : tensor<64x128xf32>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/constant/constant_bf16.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_bfloat16_scalar() -> tensor<bf16> {
%0 = stablehlo.constant dense<3.0> : tensor<bf16>
return %0 : tensor<bf16>
}

func.func public @test_bfloat16_empty() -> tensor<64x128xbf16> {
%0 = stablehlo.constant dense<0.0> : tensor<64x128xbf16>
return %0 : tensor<64x128xbf16>
}

func.func public @test_bfloat16_splat() -> tensor<64x128xbf16> {
%0 = stablehlo.constant dense<3.0> : tensor<64x128xbf16>
return %0 : tensor<64x128xbf16>
}
}
17 changes: 17 additions & 0 deletions test/ttmlir/Silicon/StableHLO/constant/constant_bool.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_boolean_scalar() -> tensor<i1> {
%0 = stablehlo.constant dense<true> : tensor<i1>
return %0 : tensor<i1>
}

func.func public @test_boolean_splat() -> tensor<64xi1> {
%0 = stablehlo.constant dense<true> : tensor<64xi1>
return %0 : tensor<64xi1>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/constant/constant_f16.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_float16_scalar() -> tensor<f16> {
%0 = stablehlo.constant dense<3.0> : tensor<f16>
return %0 : tensor<f16>
}

func.func public @test_float16_empty() -> tensor<64xf16> {
%0 = stablehlo.constant dense<0.0> : tensor<64xf16>
return %0 : tensor<64xf16>
}

func.func public @test_float16_splat() -> tensor<64xf16> {
%0 = stablehlo.constant dense<3.0> : tensor<64xf16>
return %0 : tensor<64xf16>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/constant/constant_f32.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_float_scalar() -> tensor<f32> {
%0 = stablehlo.constant dense<0.3> : tensor<f32>
return %0 : tensor<f32>
}

func.func public @test_float_empty() -> tensor<64xf32> {
%0 = stablehlo.constant dense<0.0> : tensor<64xf32>
return %0 : tensor<64xf32>
}

func.func public @test_float_splat() -> tensor<64xf32> {
%0 = stablehlo.constant dense<0.3> : tensor<64xf32>
return %0 : tensor<64xf32>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Silicon/StableHLO/constant/constant_i16.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \
// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn

module @jit_constant attributes {} {
func.func public @test_int32_scalar() -> tensor<i16> {
%0 = stablehlo.constant dense<3> : tensor<i16>
return %0 : tensor<i16>
}

func.func public @test_int32_empty() -> tensor<64x128xi16> {
%0 = stablehlo.constant dense<0> : tensor<64x128xi16>
return %0 : tensor<64x128xi16>
}

func.func public @test_int32_splat() -> tensor<64x128xi16> {
%0 = stablehlo.constant dense<3> : tensor<64x128xi16>
return %0 : tensor<64x128xi16>
}
}
Loading

0 comments on commit ed1125f

Please sign in to comment.