Skip to content

Commit

Permalink
Refactoring.
Browse files Browse the repository at this point in the history
  • Loading branch information
uazizTT committed Oct 31, 2024
1 parent d694b1e commit b6f73a3
Show file tree
Hide file tree
Showing 13 changed files with 20 additions and 21 deletions.
2 changes: 1 addition & 1 deletion include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def TTIR_LogitOp: TTIR_ElementwiseUnaryOp<"logit"> {
}

def TTIR_TanOp: TTIR_ElementwiseUnaryOp<"tan"> {
let summary = "Eltwise logistic op.";
let summary = "Eltwise tan op.";
let description = [{
Eltwise tan operation.
}];
Expand Down
2 changes: 1 addition & 1 deletion include/ttmlir/Target/TTNN/program.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ enum EltwiseOpType: uint32 {
Ceil = 25,
Sin = 26,
Cos = 27,
Logistic = 28,
Logit = 28,
Tan = 29,
Tanh = 30,
Log = 31
Expand Down
2 changes: 1 addition & 1 deletion lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) {
} else if constexpr (std::is_same_v<EltwiseOp, SinOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Sin;
} else if constexpr (std::is_same_v<EltwiseOp, LogitOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Logistic;
type = ::tt::target::ttnn::EltwiseOpType::Logit;
} else if constexpr (std::is_same_v<EltwiseOp, TanOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Tan;
} else if constexpr (std::is_same_v<EltwiseOp, TanhOp>) {
Expand Down
1 change: 0 additions & 1 deletion runtime/include/tt/runtime/detail/ttnn.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@
#include "ttnn/operations/eltwise/binary/binary.hpp"
#include "ttnn/operations/eltwise/binary/binary_composite.hpp"
#include "ttnn/operations/eltwise/unary/unary.hpp"
#include "ttnn/operations/eltwise/unary/unary_composite.hpp"
#include "ttnn/operations/embedding/embedding.hpp"
#include "ttnn/operations/matmul/matmul.hpp"
#include "ttnn/operations/normalization/softmax/softmax.hpp"
Expand Down
2 changes: 1 addition & 1 deletion runtime/lib/ttnn/operations/eltwise/unary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::reciprocal);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Logistic: {
case ::tt::target::ttnn::EltwiseOpType::Logit: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::sigmoid);
break;
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// REQUIRES: stablehlo
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s
module @jit_eltwise_logistic attributes {} {
func.func public @test_logistic(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
module @jit_eltwise_logit attributes {} {
func.func public @test_logit(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = stablehlo.logistic %arg0 : tensor<13x21x3xf32>
// CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]]
// CHECK: [[VAL1:%[0-9]+]] = "ttir.logit"(%arg0, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]]
Expand Down
4 changes: 2 additions & 2 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ceil.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.ceil"[[C:.*]]
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}})
// CHECK: %{{[0-9]+}} = "ttnn.ceil"(%{{[0-9]+}}, [[VAL0]])
%1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
4 changes: 2 additions & 2 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_cosine.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.cos"[[C:.*]]
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}})
// CHECK: %{{[0-9]+}} = "ttnn.cos"(%{{[0-9]+}}, [[VAL0]])
%1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
4 changes: 2 additions & 2 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @log(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.log"[[C:.*]]
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}})
// CHECK: %{{[0-9]+}} = "ttnn.log"(%{{[0-9]+}}, [[VAL0]])
%1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
4 changes: 2 additions & 2 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_logit.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @logit(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.logit"[[C:.*]]
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}})
// CHECK: %{{[0-9]+}} = "ttnn.logit"(%{{[0-9]+}}, [[VAL0]])
%1 = "ttir.logit"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
4 changes: 2 additions & 2 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sine.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.sin"[[C:.*]]
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}})
// CHECK: %{{[0-9]+}} = "ttnn.sin"(%{{[0-9]+}}, [[VAL0]])
%1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
4 changes: 2 additions & 2 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_tan.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @tan(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.tan"[[C:.*]]
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}})
// CHECK: %{{[0-9]+}} = "ttnn.tan"(%{{[0-9]+}}, [[VAL0]])
%1 = "ttir.tan"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
4 changes: 2 additions & 2 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_tanh.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @tanh(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.tanh"[[C:.*]]
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}})
// CHECK: %{{[0-9]+}} = "ttnn.tanh"(%{{[0-9]+}}, [[VAL0]])
%1 = "ttir.tanh"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}

0 comments on commit b6f73a3

Please sign in to comment.