Skip to content

Commit

Permalink
#4003: split unary sweep into per op sweeps
Browse files Browse the repository at this point in the history
  • Loading branch information
arakhmati committed Jan 25, 2024
1 parent 66d2a9b commit 5d6f52e
Show file tree
Hide file tree
Showing 5 changed files with 234 additions and 13 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,6 @@


parameters = {
"ttnn_function,torch_function": [
(ttnn.exp, torch.exp),
(ttnn.tanh, torch.tanh),
(ttnn.gelu, torch.nn.functional.gelu),
(ttnn.rsqrt, torch.rsqrt),
(ttnn.relu, torch.relu),
],
"batch_sizes": [(1,)],
"height": [384, 1024],
"width": [1024, 4096],
Expand All @@ -38,8 +31,6 @@ def is_expected_to_fail(**_) -> Tuple[bool, Optional[str]]:


def run(
ttnn_function,
torch_function,
batch_sizes,
height,
width,
Expand All @@ -53,17 +44,15 @@ def run(

low = -0.1
high = 0.1
if ttnn_function in {ttnn.rsqrt}:
low = 0.0

torch_input_tensor = torch_random(input_shape, low, high, dtype=torch.float32)
torch_output_tensor = torch_function(torch_input_tensor)
torch_output_tensor = torch.exp(torch_input_tensor)

input_tensor = ttnn.from_torch(
torch_input_tensor, dtype=input_dtype, device=device, memory_config=input_memory_config
)

output_tensor = ttnn_function(input_tensor, memory_config=output_memory_config)
output_tensor = ttnn.exp(input_tensor, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)

return check_with_pcc(torch_output_tensor, output_tensor, 0.999)
58 changes: 58 additions & 0 deletions tests/ttnn/sweep_tests/sweeps/gelu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

from typing import Optional, Tuple

import torch

import ttnn

from tests.ttnn.utils_for_testing import check_with_pcc
from models.utility_functions import torch_random


parameters = {
"batch_sizes": [(1,)],
"height": [384, 1024],
"width": [1024, 4096],
"input_dtype": [ttnn.bfloat16],
"input_memory_config": [ttnn.DRAM_MEMORY_CONFIG],
"output_memory_config": [ttnn.DRAM_MEMORY_CONFIG],
}


def skip(**_) -> Tuple[bool, Optional[str]]:
return False, None


def is_expected_to_fail(**_) -> Tuple[bool, Optional[str]]:
return False, None


def run(
batch_sizes,
height,
width,
input_dtype,
input_memory_config,
output_memory_config,
*,
device,
) -> Tuple[bool, Optional[str]]:
input_shape = (*batch_sizes, height, width)

low = -0.1
high = 0.1

torch_input_tensor = torch_random(input_shape, low, high, dtype=torch.float32)
torch_output_tensor = torch.nn.functional.gelu(torch_input_tensor)

input_tensor = ttnn.from_torch(
torch_input_tensor, dtype=input_dtype, device=device, memory_config=input_memory_config
)

output_tensor = ttnn.gelu(input_tensor, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)

return check_with_pcc(torch_output_tensor, output_tensor, 0.999)
58 changes: 58 additions & 0 deletions tests/ttnn/sweep_tests/sweeps/relu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

from typing import Optional, Tuple

import torch

import ttnn

from tests.ttnn.utils_for_testing import check_with_pcc
from models.utility_functions import torch_random


parameters = {
"batch_sizes": [(1,)],
"height": [384, 1024],
"width": [1024, 4096],
"input_dtype": [ttnn.bfloat16],
"input_memory_config": [ttnn.DRAM_MEMORY_CONFIG],
"output_memory_config": [ttnn.DRAM_MEMORY_CONFIG],
}


def skip(**_) -> Tuple[bool, Optional[str]]:
return False, None


def is_expected_to_fail(**_) -> Tuple[bool, Optional[str]]:
return False, None


def run(
batch_sizes,
height,
width,
input_dtype,
input_memory_config,
output_memory_config,
*,
device,
) -> Tuple[bool, Optional[str]]:
input_shape = (*batch_sizes, height, width)

low = -0.1
high = 0.1

torch_input_tensor = torch_random(input_shape, low, high, dtype=torch.float32)
torch_output_tensor = torch.relu(torch_input_tensor)

input_tensor = ttnn.from_torch(
torch_input_tensor, dtype=input_dtype, device=device, memory_config=input_memory_config
)

output_tensor = ttnn.relu(input_tensor, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)

return check_with_pcc(torch_output_tensor, output_tensor, 0.999)
58 changes: 58 additions & 0 deletions tests/ttnn/sweep_tests/sweeps/rsqrt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

from typing import Optional, Tuple

import torch

import ttnn

from tests.ttnn.utils_for_testing import check_with_pcc
from models.utility_functions import torch_random


parameters = {
"batch_sizes": [(1,)],
"height": [384, 1024],
"width": [1024, 4096],
"input_dtype": [ttnn.bfloat16],
"input_memory_config": [ttnn.DRAM_MEMORY_CONFIG],
"output_memory_config": [ttnn.DRAM_MEMORY_CONFIG],
}


def skip(**_) -> Tuple[bool, Optional[str]]:
return False, None


def is_expected_to_fail(**_) -> Tuple[bool, Optional[str]]:
return False, None


def run(
batch_sizes,
height,
width,
input_dtype,
input_memory_config,
output_memory_config,
*,
device,
) -> Tuple[bool, Optional[str]]:
input_shape = (*batch_sizes, height, width)

low = 0
high = 0.1

torch_input_tensor = torch_random(input_shape, low, high, dtype=torch.float32)
torch_output_tensor = torch.rsqrt(torch_input_tensor)

input_tensor = ttnn.from_torch(
torch_input_tensor, dtype=input_dtype, device=device, memory_config=input_memory_config
)

output_tensor = ttnn.rsqrt(input_tensor, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)

return check_with_pcc(torch_output_tensor, output_tensor, 0.999)
58 changes: 58 additions & 0 deletions tests/ttnn/sweep_tests/sweeps/tanh.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

from typing import Optional, Tuple

import torch

import ttnn

from tests.ttnn.utils_for_testing import check_with_pcc
from models.utility_functions import torch_random


parameters = {
"batch_sizes": [(1,)],
"height": [384, 1024],
"width": [1024, 4096],
"input_dtype": [ttnn.bfloat16],
"input_memory_config": [ttnn.DRAM_MEMORY_CONFIG],
"output_memory_config": [ttnn.DRAM_MEMORY_CONFIG],
}


def skip(**_) -> Tuple[bool, Optional[str]]:
return False, None


def is_expected_to_fail(**_) -> Tuple[bool, Optional[str]]:
return False, None


def run(
batch_sizes,
height,
width,
input_dtype,
input_memory_config,
output_memory_config,
*,
device,
) -> Tuple[bool, Optional[str]]:
input_shape = (*batch_sizes, height, width)

low = -0.1
high = 0.1

torch_input_tensor = torch_random(input_shape, low, high, dtype=torch.float32)
torch_output_tensor = torch.tanh(torch_input_tensor)

input_tensor = ttnn.from_torch(
torch_input_tensor, dtype=input_dtype, device=device, memory_config=input_memory_config
)

output_tensor = ttnn.tanh(input_tensor, memory_config=output_memory_config)
output_tensor = ttnn.to_torch(output_tensor)

return check_with_pcc(torch_output_tensor, output_tensor, 0.999)

0 comments on commit 5d6f52e

Please sign in to comment.