diff --git a/tests/ttnn/python_api_testing/non_working_unit_tests/grayskull/test_eltwise_threshold.py b/tests/ttnn/python_api_testing/non_working_unit_tests/grayskull/test_eltwise_threshold.py index 3ebda8b721d..ebded70c301 100644 --- a/tests/ttnn/python_api_testing/non_working_unit_tests/grayskull/test_eltwise_threshold.py +++ b/tests/ttnn/python_api_testing/non_working_unit_tests/grayskull/test_eltwise_threshold.py @@ -45,7 +45,7 @@ def run_eltwise_threshold_tests( assert tt_result.shape == ref_value.shape # compare tt and golden outputs - success, pcc_value = comp_pcc(ref_value, tt_result) + success, pcc_value = comp_pcc(ref_value, tt_result, pcc=0.97) logger.debug(pcc_value) logger.debug(success) diff --git a/tests/ttnn/python_api_testing/non_working_unit_tests/grayskull/test_isclose.py b/tests/ttnn/python_api_testing/non_working_unit_tests/grayskull/test_isclose.py index b027d1a5397..aabe08ca3fc 100644 --- a/tests/ttnn/python_api_testing/non_working_unit_tests/grayskull/test_isclose.py +++ b/tests/ttnn/python_api_testing/non_working_unit_tests/grayskull/test_isclose.py @@ -26,8 +26,8 @@ def run_isclose_tests( device, ): torch.manual_seed(data_seed) - x = torch.Tensor(size=input_shape[0]).uniform_(-100, 100) - y = torch.Tensor(size=input_shape[1]).uniform_(-200, 200) + x = torch.randn(input_shape[0]) + y = torch.Tensor(size=input_shape[1]).uniform_(-100, 100) try: # get ref result @@ -36,7 +36,7 @@ def run_isclose_tests( t0 = ttnn_ops.setup_ttnn_tensor(x, device, dlayout[0], in_mem_config[0], dtype[0]) t1 = ttnn_ops.setup_ttnn_tensor(y, device, dlayout[1], in_mem_config[1], dtype[1]) - t2 = ttnn.isclose(t0, t1, rtol, atol, memory_config=output_mem_config) + t2 = ttnn.isclose(t0, t1, rtol=rtol, atol=atol, equal_nan=equal_nan, memory_config=output_mem_config) tt_result = ttnn_ops.ttnn_tensor_to_torch(t2, output_mem_config) @@ -58,18 +58,7 @@ def run_isclose_tests( test_sweep_args = [ ( - [(224, 128), (224, 128)], - [ttnn.bfloat16, ttnn.bfloat16], - [ttnn.TILE_LAYOUT, ttnn.TILE_LAYOUT], - [ttnn.DRAM_MEMORY_CONFIG, ttnn.DRAM_MEMORY_CONFIG], - ttnn.DRAM_MEMORY_CONFIG, - 1.0011717677116394e-07, - 9.968061931431293e-10, - False, - 8687804, - ), - ( - [(224, 128), (224, 128)], + [(1, 1, 224, 128), (1, 1, 224, 128)], [ttnn.bfloat16, ttnn.bfloat8_b], [ttnn.TILE_LAYOUT, ttnn.TILE_LAYOUT], [ttnn.DRAM_MEMORY_CONFIG, ttnn.DRAM_MEMORY_CONFIG], diff --git a/tt_eager/tt_dnn/op_library/composite/composite_ops.cpp b/tt_eager/tt_dnn/op_library/composite/composite_ops.cpp index f9beaa14c8f..ae9cdc2d97a 100644 --- a/tt_eager/tt_dnn/op_library/composite/composite_ops.cpp +++ b/tt_eager/tt_dnn/op_library/composite/composite_ops.cpp @@ -27,7 +27,7 @@ namespace tt_metal { Tensor mk_zero_tensor_like(const Tensor& reference_tensor, const MemoryConfig& output_mem_config) { // Tensor zero_like = bcast(reference_tensor, , BcastOpMath::MUL, BcastOpDim::HW); - static const Tensor zero = mk_tiled_scalar(0.0f, reference_tensor.get_dtype()); + Tensor zero = mk_tiled_scalar(0.0f, reference_tensor.get_dtype()); Tensor zero_like = bcast(reference_tensor, zero, BcastOpMath::MUL, BcastOpDim::HW, output_mem_config); return zero_like; } @@ -118,7 +118,9 @@ Tensor _softplus(const Tensor& x, float beta, float threshold, const MemoryConfi Tensor x_beta = mul_unary(x, beta, output_mem_config); Tensor exp_x = exp(x_beta, output_mem_config); Tensor result_log1p = log1p(exp_x, output_mem_config); + exp_x.deallocate(); Tensor sp_result = mul_unary(result_log1p, oned_beta, output_mem_config); + result_log1p.deallocate(); sp_result = where(gt(x_beta, full_like(x, threshold, output_mem_config), std::nullopt, output_mem_config), x, where(eqz(full_like(x, beta, output_mem_config), output_mem_config), std::numeric_limits::infinity(), sp_result), output_mem_config); return sp_result; diff --git a/tt_eager/tt_dnn/op_library/eltwise_unary/eltwise_unary_op.cpp b/tt_eager/tt_dnn/op_library/eltwise_unary/eltwise_unary_op.cpp index 6ee102afbe5..a8183ff8881 100644 --- a/tt_eager/tt_dnn/op_library/eltwise_unary/eltwise_unary_op.cpp +++ b/tt_eager/tt_dnn/op_library/eltwise_unary/eltwise_unary_op.cpp @@ -383,8 +383,8 @@ const operation::Hash EltwiseUnary::compute_program_hash(const std::vector Tensor tie_binop_to_unary(const Tensor& input_tensor, float value, const MemoryConfig& output_mem_config) { - Tensor t_value = mk_tiled_scalar(value, input_tensor.get_dtype()); - return bcast(input_tensor, t_value, OP, BcastOpDim::HW); + Tensor t_value = mk_tiled_scalar(value, input_tensor.get_dtype()); + return bcast(input_tensor, t_value, OP, BcastOpDim::HW); } Tensor lte_unary(const Tensor& input_tensor, float value, const MemoryConfig& output_mem_config) {