From 82714614db1ef1e01b4e9819214f58a14f0a2515 Mon Sep 17 00:00:00 2001 From: VirdhatchaniKN Date: Tue, 5 Nov 2024 12:06:51 +0000 Subject: [PATCH] #14710: Subtract op Sweep for failing cases --- .github/workflows/ttnn-run-sweeps.yaml | 1 + .../binary/subtract/subtract_tensor_fails.py | 104 ++++++++++++++++++ 2 files changed, 105 insertions(+) create mode 100644 tests/sweep_framework/sweeps/eltwise/binary/subtract/subtract_tensor_fails.py diff --git a/.github/workflows/ttnn-run-sweeps.yaml b/.github/workflows/ttnn-run-sweeps.yaml index eb5daaf2471..19ce0783ad1 100644 --- a/.github/workflows/ttnn-run-sweeps.yaml +++ b/.github/workflows/ttnn-run-sweeps.yaml @@ -195,6 +195,7 @@ on: - eltwise.unary_complex.angle_bw.angle_bw - eltwise.binary.subtract.subtract - eltwise.binary.subtract.subtract_tensor_pytorch2 + - eltwise.binary.subtract.subtract_tensor_fails - eltwise.binary.multiply.multiply - eltwise.binary.multiply.mul_tensor_pytorch2 - eltwise.binary.multiply.multiply_scalar_pytorch2 diff --git a/tests/sweep_framework/sweeps/eltwise/binary/subtract/subtract_tensor_fails.py b/tests/sweep_framework/sweeps/eltwise/binary/subtract/subtract_tensor_fails.py new file mode 100644 index 00000000000..f50389c2c38 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/subtract/subtract_tensor_fails.py @@ -0,0 +1,104 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import random +import ttnn +from tests.sweep_framework.sweep_utils.utils import gen_shapes +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + +TIMEOUT = 30 + +random.seed(0) + + +parameters = { + "nightly": { + "input_specs": [ + {"shape": [0, 1], "other": [0, 1]}, + {"shape": [0], "other": [0]}, + {"shape": [1, 10], "other": [10, 1]}, + {"shape": [1, 15], "other": [15, 1]}, + {"shape": [1, 17], "other": [17, 1]}, + {"shape": [1, 2], "other": [2, 1]}, + {"shape": [16, 1, 49], "other": [16, 49, 1]}, + {"shape": [16, 1, 64], "other": [16, 64, 1]}, + {"shape": [24, 1], "other": [1, 24]}, + {"shape": [4, 1, 49], "other": [4, 49, 1]}, + {"shape": [4, 1, 64], "other": [4, 64, 1]}, + {"shape": [64, 1, 49], "other": [64, 49, 1]}, + {"shape": [64, 1, 64], "other": [64, 64, 1]}, + ], + "input_a_dtype": [ttnn.bfloat16], + "input_b_dtype": [ttnn.bfloat16], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + }, +} + + +def run( + input_specs, + input_a_dtype, + input_b_dtype, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + data_seed = random.randint(0, 20000000) + torch.manual_seed(data_seed) + + input_shape = input_specs["shape"] + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype + )(input_shape) + + other = input_specs["other"] + if isinstance(other, (int, float)): + torch_other_tensor = torch.tensor(other, dtype=torch.float32) + else: + torch_other_tensor = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_b_dtype + )(other) + + golden_function = ttnn.get_golden_function(ttnn.sub) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_other_tensor) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_other_tensor, + dtype=input_b_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + + output_tensor = ttnn.subtract(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + output_tensor = ttnn.to_torch(output_tensor) + + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf]