Skip to content

Commit

Permalink
#13536: testing without 0-dim and 1D restrictions
Browse files Browse the repository at this point in the history
  • Loading branch information
KalaivaniMCW authored and VirdhatchaniKN committed Nov 5, 2024
1 parent 8967f69 commit 43a04bf
Show file tree
Hide file tree
Showing 16 changed files with 204 additions and 92 deletions.
60 changes: 42 additions & 18 deletions tests/sweep_framework/sweeps/eltwise/binary/add/add_all_pytorch2.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,16 @@
from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
from models.utility_functions import torch_random

# Override the default timeout in seconds for hang detection.
TIMEOUT = 30

random.seed(0)

# Parameters provided to the test vector generator are defined here.
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values.
# Each suite has a key name (in this case "suite_1") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"nightly": {
"test_bcast_1d_13": {
"input_shape": [
{"self": [0, 1], "other": [0, 1]},
{"self": [0], "other": [0]},
# {"self": [0, 1], "other": [0, 1]}, #0 is not a valid shape
# {"self": [0], "other": [0]}, #0 is not a valid shape
{"self": [1, 1, 1024], "other": [1, 1, 1024]},
{"self": [1, 1, 16, 32], "other": [1, 1, 16, 32]},
{"self": [1, 1, 3072], "other": [1, 1, 3072]},
Expand Down Expand Up @@ -403,7 +399,7 @@
{"self": [8732, 1], "other": [8732, 1]},
{"self": [8732, 2], "other": [8732, 2]},
{"self": [8732], "other": [8732]},
{"self": [], "other": []},
# {"self": [], "other": []}, #without empty tensor
{"self": [920, 1, 256], "other": [256]},
{"self": [920, 1, 256], "other": [920, 1, 256]},
{"self": [1, 1, 1, 42], "other": -6.0},
Expand Down Expand Up @@ -504,7 +500,7 @@
{"self": [7], "other": 0.0},
{"self": [800], "other": 0.5},
{"self": [80], "other": 0.5},
{"self": [], "other": 1},
# {"self": [], "other": 1}, #without empty tensor
],
# {"self": [s0 + 1, s0 + 1], "other": 16},
# {"self": [s0 + 1, s0 + 1], "other": 0},
Expand Down Expand Up @@ -533,6 +529,24 @@
}


# Invalidate vector is called during the generation phase where each vector will be passed in.
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. len(test_vector["input_shape"]["other"]) >= 4
# def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
# if isinstance(test_vector["input_shape"]["other"], list) and len(test_vector["input_shape"]["self"]) >= 4:
# is_less_than_3d = len(test_vector["input_shape"]["other"]) < 3
# c_index = test_vector["input_shape"]["self"][-3]
# if is_less_than_3d or (
# len(test_vector["input_shape"]["other"]) >= 3 and test_vector["input_shape"]["other"][-3] < c_index
# ):
# print("checking channel bcast")
# print("input ", test_vector["input_shape"]["self"])
# print("other ", test_vector["input_shape"]["other"])
# return True, "channel dim bcast not supported"

# return False, None


# This is the run instructions for the test, defined by the developer.
# The run function must take the above-defined parameters as inputs.
# The runner will call this run function with each test vector, and the returned results from this function will be stored.
Expand All @@ -548,19 +562,23 @@ def run(
*,
device,
) -> list:
data_seed = random.randint(0, 20000000)
torch.manual_seed(data_seed)
torch.manual_seed(0)

torch_input_tensor_a = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype
partial(torch_random, low=-100, high=100, dtype=torch.bfloat16), input_a_dtype
)(input_shape["self"])

if isinstance(input_shape["other"], list):
torch_input_tensor_b = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_b_dtype
)(input_shape["other"])
if len(input_shape["other"]):
torch_input_tensor_b = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.bfloat16), input_b_dtype
)(input_shape["other"])
else:
print("input shape", input_shape)
torch_input_tensor_b = torch.tensor(0, dtype=torch.bfloat16)
print("torch_input_tensor_b shape", torch_input_tensor_b)
else:
torch_input_tensor_b = torch.tensor(input_shape["other"], dtype=torch.float32)
torch_input_tensor_b = torch.tensor(input_shape["other"], dtype=torch.bfloat16)
# torch_input_tensor_b = input_shape["other"]

golden_function = ttnn.get_golden_function(ttnn.add)
Expand All @@ -587,7 +605,13 @@ def run(

start_time = start_measuring_time()
result = ttnn.add(input_tensor_a, input_tensor_b)
output_tensor = ttnn.to_torch(result)

# handles 1 D input_a and scalar or empty [] input_b
if len(input_shape["self"]) == 1 and (not isinstance(input_shape["other"], list) or not input_shape["other"]):
output_tensor = ttnn.to_torch(result, original_shape=input_shape["self"])
else:
output_tensor = ttnn.to_torch(result)

e2e_perf = stop_measuring_time(start_time)

return [check_with_pcc(torch_output_tensor, output_tensor, pcc=0.9999), e2e_perf]
return [check_with_pcc(torch_output_tensor, output_tensor, pcc=0.999), e2e_perf]
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,16 @@
from functools import partial

import torch
import random
import ttnn
from tests.sweep_framework.sweep_utils.utils import gen_shapes
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
from models.utility_functions import torch_random

# Override the default timeout in seconds for hang detection.
TIMEOUT = 30

random.seed(0)


parameters = {
"nightly": {
"nightly_14": {
"input_specs": [
{"shape": [0, 1], "other": 1.0},
{"shape": [1, 1, 16384, 256], "other": 5.656854249492381},
Expand Down Expand Up @@ -143,8 +137,7 @@ def run(
*,
device,
) -> list:
data_seed = random.randint(0, 20000000)
torch.manual_seed(data_seed)
torch.manual_seed(0)

input_shape = input_specs["shape"]
if len(input_shape) == 0:
Expand Down Expand Up @@ -186,7 +179,14 @@ def run(
start_time = start_measuring_time()

output_tensor = ttnn.divide(input_tensor_a, input_tensor_b, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)

# handles 1 D input_a and scalar or empty [] input_b
if len(input_specs["shape"]) == 1 and (
not isinstance(input_specs["other"], list) or not input_specs["other"] or len(input_specs["other"]) == 1
):
output_tensor = ttnn.to_torch(output_tensor, original_shape=input_specs["shape"])
else:
output_tensor = ttnn.to_torch(output_tensor)

e2e_perf = stop_measuring_time(start_time)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"nightly": {
"pass_1": {
"input_shape": [
[1, 1, 256],
[1, 16],
Expand Down Expand Up @@ -83,7 +83,11 @@ def run(

start_time = start_measuring_time()
output_tensor = ttnn.eq(input_tensor_a, scalar, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)
# to handle 1D inputs giving 2D outputs
if len(input_shape) == 1:
output_tensor = ttnn.to_torch(output_tensor, original_shape=input_shape)
else:
output_tensor = ttnn.to_torch(output_tensor)
e2e_perf = stop_measuring_time(start_time)

return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf]
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"nightly": {
"pass_1": {
"input_shape": [
[128],
],
Expand Down Expand Up @@ -91,7 +91,11 @@ def run(

start_time = start_measuring_time()
output_tensor = ttnn.floor_div(input_tensor_a, input_tensor_b, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)
# to handle 1D inputs giving 2D outputs
if len(input_shape) == 1:
output_tensor = ttnn.to_torch(output_tensor, original_shape=input_shape)
else:
output_tensor = ttnn.to_torch(output_tensor)
e2e_perf = stop_measuring_time(start_time)

return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf]
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@
# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"nightly": {
"pass_1": {
"input_shape": [
[10, 10],
[15, 15],
[],
[], # how should this be tested ?
],
"scalar": [0, 0],
"input_a_dtype": [ttnn.bfloat16],
Expand Down Expand Up @@ -74,7 +74,11 @@ def run(

start_time = start_measuring_time()
output_tensor = ttnn.gt(input_tensor_a, scalar, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)
# to handle 1D inputs giving 2D outputs
if len(input_shape) == 1:
output_tensor = ttnn.to_torch(output_tensor, original_shape=input_shape)
else:
output_tensor = ttnn.to_torch(output_tensor)
e2e_perf = stop_measuring_time(start_time)

return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf]
Original file line number Diff line number Diff line change
Expand Up @@ -6,27 +6,22 @@
from functools import partial

import torch
import random
import ttnn
from tests.sweep_framework.sweep_utils.utils import gen_shapes
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
from models.utility_functions import torch_random

# Override the default timeout in seconds for hang detection.
TIMEOUT = 30

random.seed(0)

# Parameters provided to the test vector generator are defined here.
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values.
# Each suite has a key name (in this case "suite_1") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"nightly": {
"test_bcast_1d_11": {
"input_shape": [
{"self": [0], "other": 0.5},
{"self": [0], "other": 0.5}, # is [0] a valid shape ?
{"self": [1, 1, 1, 10], "other": -3.4028234663852886e38},
{"self": [1, 1, 1, 12], "other": -3.4028234663852886e38},
{"self": [1, 1, 1, 14], "other": -3.4028234663852886e38},
Expand Down Expand Up @@ -191,8 +186,8 @@
{"self": [8732, 2], "other": 0.5},
{"self": [8732], "other": 0.5},
# vec other
{"self": [0, 1], "other": [0, 1]},
{"self": [0], "other": []},
# {"self": [0, 1], "other": [0, 1]},
{"self": [0], "other": []}, # invalid shape [0]
{"self": [1, 1, 1, 17], "other": [1, 1, 1, 17]},
{"self": [1, 1, 1, 1], "other": [1, 1, 1, 1]},
{"self": [1, 1, 1, 2], "other": [1, 1, 1, 2]},
Expand Down Expand Up @@ -412,6 +407,28 @@
}


# Invalidate vector is called during the generation phase where each vector will be passed in.
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. len(test_vector["input_shape"]["other"]) >= 4
# def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
# if len(test_vector["input_shape"]["self"]) == 0 or (
# isinstance(test_vector["input_shape"]["other"], list) and len(test_vector["input_shape"]["other"]) == 0
# ):
# return True, "empty shape not supported"
# if isinstance(test_vector["input_shape"]["other"], list) and len(test_vector["input_shape"]["self"]) >= 4:
# is_less_than_3d = len(test_vector["input_shape"]["other"]) < 3
# c_index = test_vector["input_shape"]["self"][-3]
# if is_less_than_3d or (
# len(test_vector["input_shape"]["other"]) >= 3 and test_vector["input_shape"]["other"][-3] < c_index
# ):
# print("checking channel bcast")
# print("input ", test_vector["input_shape"]["self"])
# print("other ", test_vector["input_shape"]["other"])
# return True, "channel dim bcast not supported"

# return False, None


# This is the run instructions for the test, defined by the developer.
# The run function must take the above-defined parameters as inputs.
# The runner will call this run function with each test vector, and the returned results from this function will be stored.
Expand All @@ -427,19 +444,21 @@ def run(
*,
device,
) -> list:
data_seed = random.randint(0, 20000000)
torch.manual_seed(data_seed)
torch.manual_seed(0)

torch_input_tensor_a = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype
partial(torch_random, low=-100, high=100, dtype=torch.bfloat16), input_a_dtype
)(input_shape["self"])

if isinstance(input_shape["other"], list):
torch_input_tensor_b = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_b_dtype
)(input_shape["other"])
if len(input_shape["other"]):
torch_input_tensor_b = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.bfloat16), input_b_dtype
)(input_shape["other"])
else:
torch_input_tensor_b = torch.tensor(0, dtype=torch.bfloat16)
else:
torch_input_tensor_b = torch.tensor(input_shape["other"], dtype=torch.float32)
torch_input_tensor_b = torch.tensor(input_shape["other"], dtype=torch.bfloat16)
# torch_input_tensor_b = input_shape["other"]

golden_function = ttnn.get_golden_function(ttnn.mul)
Expand All @@ -466,7 +485,11 @@ def run(

start_time = start_measuring_time()
result = ttnn.mul(input_tensor_a, input_tensor_b)
output_tensor = ttnn.to_torch(result)
# handles 1 D input_a and scalar or empty [] input_b
if len(input_shape["self"]) == 1 and (not isinstance(input_shape["other"], list) or not input_shape["other"]):
output_tensor = ttnn.to_torch(result, original_shape=input_shape["self"])
else:
output_tensor = ttnn.to_torch(result)
e2e_perf = stop_measuring_time(start_time)

return [check_with_pcc(torch_output_tensor, output_tensor, pcc=0.99), e2e_perf]
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"nightly": {
"pass_1": {
"input_shape": [
[1],
],
Expand Down Expand Up @@ -72,7 +72,11 @@ def run(

start_time = start_measuring_time()
output_tensor = ttnn.remainder(input_tensor_a, scalar, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)
# to handle 1D inputs giving 2D outputs
if len(input_shape) == 1:
output_tensor = ttnn.to_torch(output_tensor, original_shape=input_shape)
else:
output_tensor = ttnn.to_torch(output_tensor)
e2e_perf = stop_measuring_time(start_time)

return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf]
Loading

0 comments on commit 43a04bf

Please sign in to comment.