Skip to content

Commit

Permalink
Stable HLO runtime tests
Browse files Browse the repository at this point in the history
  • Loading branch information
mmanzoorTT committed Nov 15, 2024
1 parent 5fd2630 commit 138ae23
Show file tree
Hide file tree
Showing 47 changed files with 1,314 additions and 0 deletions.
21 changes: 21 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/add_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_add attributes {} {
func.func public @test_add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK-LABEL: func.func public @test_add
// CHECK: ttnn.empty
// CHECK: ttnn.add
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: -> tensor<64x128xf32,
%0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
81 changes: 81 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/compare_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_compare attributes {} {
func.func public @test_eq(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @test_eq
// CHECK: ttnn.empty
// CHECK: ttnn.eq
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.compare EQ, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_ne(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @test_ne
// CHECK: ttnn.empty
// CHECK: ttnn.ne
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.compare NE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @test_ge
// CHECK: ttnn.empty
// CHECK: ttnn.ge
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.compare GE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_gt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @test_gt
// CHECK: ttnn.empty
// CHECK: ttnn.gt
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.compare GT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_le(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @test_le
// CHECK: ttnn.empty
// CHECK: ttnn.le
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.compare LE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @test_lt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @test_lt
// CHECK: ttnn.empty
// CHECK: ttnn.lt
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.compare LT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}
}
85 changes: 85 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/concat_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_concat attributes {} {
func.func public @test_concat_0(%arg0: tensor<32x32xf32>, %arg1: tensor<64x32xf32>) -> tensor<96x32xf32> {
// CHECK-LABEL: func.func public @test_concat_0
// CHECK: ttnn.empty
// CHECK: ttnn.concat
// CHECK-SAME: dim = 0
// CHECK-SAME: tensor<32x32xf32,
// CHECK-SAME: tensor<64x32xf32,
// CHECK-SAME: tensor<96x32xf32,
// CHECK-SAME: -> tensor<96x32xf32,
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 0 : i64
} : (tensor<32x32xf32>, tensor<64x32xf32>) -> tensor<96x32xf32>
return %0 : tensor<96x32xf32>
}

func.func public @test_concat_1(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> {
// CHECK-LABEL: func.func public @test_concat_1
// CHECK: ttnn.empty
// CHECK: ttnn.concat
// CHECK-SAME: dim = 1
// CHECK-SAME: tensor<32x32xf32,
// CHECK-SAME: tensor<32x64xf32,
// CHECK-SAME: tensor<32x96xf32,
// CHECK-SAME: -> tensor<32x96xf32,
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<32x32xf32>, tensor<32x64xf32>) -> tensor<32x96xf32>
return %0 : tensor<32x96xf32>
}


func.func public @test_concat_2(%arg0: tensor<128x64xf32>, %arg1: tensor<128x96xf32>) -> tensor<128x160xf32> {
// CHECK-LABEL: func.func public @test_concat_2
// CHECK: ttnn.empty
// CHECK: ttnn.concat
// CHECK-SAME: dim = 1
// CHECK-SAME: tensor<128x64xf32,
// CHECK-SAME: tensor<128x96xf32,
// CHECK-SAME: tensor<128x160xf32,
// CHECK-SAME: -> tensor<128x160xf32,
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<128x64xf32>, tensor<128x96xf32>) -> tensor<128x160xf32>
return %0 : tensor<128x160xf32>
}

func.func public @test_concat_3(%arg0: tensor<64x32xf32>, %arg1: tensor<64x64xf32>) -> tensor<64x96xf32> {
// CHECK-LABEL: func.func public @test_concat_3
// CHECK: ttnn.empty
// CHECK: ttnn.concat
// CHECK-SAME: dim = 1
// CHECK-SAME: tensor<64x32xf32,
// CHECK-SAME: tensor<64x64xf32,
// CHECK-SAME: tensor<64x96xf32,
// CHECK-SAME: -> tensor<64x96xf32,
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 1 : i64
} : (tensor<64x32xf32>, tensor<64x64xf32>) -> tensor<64x96xf32>
return %0 : tensor<64x96xf32>
}

func.func public @test_concat_4(%arg0: tensor<32x32x32x32xf32>, %arg1: tensor<32x32x32x64xf32>) -> tensor<32x32x32x96xf32> {
// CHECK-LABEL: func.func public @test_concat_4
// CHECK: ttnn.empty
// CHECK: ttnn.concat
// CHECK-SAME: dim = 3
// CHECK-SAME: tensor<32x32x32x32xf32,
// CHECK-SAME: tensor<32x32x32x64xf32,
// CHECK-SAME: tensor<32x32x32x96xf32,
// CHECK-SAME: -> tensor<32x32x32x96xf32,
%0 = "stablehlo.concatenate"(%arg0, %arg1) {
dimension = 3 : i64
} : (tensor<32x32x32x32xf32>, tensor<32x32x32x64xf32>) -> tensor<32x32x32x96xf32>
return %0 : tensor<32x32x32x96xf32>
}
}
21 changes: 21 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/divide_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_divice attributes {} {
func.func public @test_divide(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK-LABEL: func.func public @test_divide
// CHECK: ttnn.empty
// CHECK: ttnn.div
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: -> tensor<64x128xf32,
%0 = stablehlo.divide %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
45 changes: 45 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/logical_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_compare attributes {} {
func.func public @logical_and(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @logical_and
// CHECK: ttnn.empty
// CHECK: ttnn.logical_and
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.and %arg0, %arg1 : tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @logical_or(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @logical_or
// CHECK: ttnn.empty
// CHECK: ttnn.logical_or
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.or %arg0, %arg1 : tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}

func.func public @logical_xor(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> {
// CHECK-LABEL: func.func public @logical_xor
// CHECK: ttnn.empty
// CHECK: ttnn.logical_xor
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: tensor<64x128xbf16,
// CHECK-SAME: -> tensor<64x128xbf16,
%0 = stablehlo.xor %arg0, %arg1 : tensor<64x128xi1>
return %0 : tensor<64x128xi1>
}
}
21 changes: 21 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/maximum_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_maximum attributes {} {
func.func public @test_maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK-LABEL: func.func public @test_maximum
// CHECK: ttnn.empty
// CHECK: ttnn.maximum
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: -> tensor<64x128xf32,
%0 = stablehlo.maximum %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
21 changes: 21 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/minimum_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_minimum attributes {} {
func.func public @test_minimum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK-LABEL: func.func public @test_minimum
// CHECK: ttnn.empty
// CHECK: ttnn.minimum
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: -> tensor<64x128xf32,
%0 = stablehlo.minimum %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
21 changes: 21 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/multiply_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_multiply attributes {} {
func.func public @test_multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK-LABEL: func.func public @test_multiply
// CHECK: ttnn.empty
// CHECK: ttnn.multiply
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: -> tensor<64x128xf32,
%0 = stablehlo.multiply %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
21 changes: 21 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/remainder_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_remainder attributes {} {
func.func public @test_remainder(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK-LABEL: func.func public @test_remainder
// CHECK: ttnn.empty
// CHECK: ttnn.remainder
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: -> tensor<64x128xf32,
%0 = stablehlo.remainder %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
21 changes: 21 additions & 0 deletions test/ttmlir/Silicon/StableHLO/Binary/subtract_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// REQUIRES: stablehlo
// RUN: rm -rf %t.ttnn
// RUN: rm -rf %t.mlir
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// RUN: FileCheck --input-file=%t.mlir %s

module @jit_eltwise_subtract attributes {} {
func.func public @test_subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK-LABEL: func.func public @test_subtract
// CHECK: ttnn.empty
// CHECK: ttnn.subtract
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: -> tensor<64x128xf32,
%0 = stablehlo.subtract %arg0, %arg1 : tensor<64x128xf32>
return %0 : tensor<64x128xf32>
}
}
Loading

0 comments on commit 138ae23

Please sign in to comment.