Skip to content

Commit

Permalink
#4433: Refactor all backward test files
Browse files Browse the repository at this point in the history
	- Fix sqrt bw to handle nan
	- Refactor tan bw op
  • Loading branch information
umadevimcw committed Dec 26, 2023
1 parent 1aeb884 commit 8a04f8a
Show file tree
Hide file tree
Showing 41 changed files with 298 additions and 850 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@
import torch
import pytest
import tt_lib
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
)
from loguru import logger
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import *


@pytest.mark.parametrize(
Expand All @@ -20,29 +17,19 @@
),
)
def test_bw_abs(input_shapes, device):
torch.manual_seed(12386)
in_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
grad_data = torch.randn(input_shapes).bfloat16()

grad_tensor = (
tt_lib.tensor.Tensor(grad_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

input_tensor = (
tt_lib.tensor.Tensor(in_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
in_data, input_tensor = data_gen_pt_tt(input_shapes, device, True)
grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device)

pyt_y = torch.abs(in_data)

tt_output_tensor_on_device = tt_lib.tensor.abs_bw(grad_tensor, input_tensor)
tt_output_tensor = tt_output_tensor_on_device[0].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()

in_data.retain_grad()

pyt_y.backward(gradient=grad_data)

golden_output_tensor = in_data.grad
golden_tensor = list()
golden_tensor.append(in_data.grad)

comp_pass, comp_out = comparison_funcs.comp_pcc(golden_output_tensor, tt_output_tensor)
logger.info(comp_out)
comp_pass = compare_results(tt_output_tensor_on_device, golden_tensor)
assert comp_pass
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,6 @@
import pytest
import tt_lib
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import *
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
)
from loguru import logger


@pytest.mark.parametrize(
Expand All @@ -21,9 +17,9 @@
),
)
def test_bw_add(input_shapes, device):
in_data, input_tensor = bw_data_gen(input_shapes, device, True)
other_data, other_tensor = bw_data_gen(input_shapes, device, True)
grad_data, grad_tensor = bw_data_gen(input_shapes, device)
in_data, input_tensor = data_gen_pt_tt(input_shapes, device, True)
other_data, other_tensor = data_gen_pt_tt(input_shapes, device, True)
grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device)

tt_output_tensor_on_device = tt_lib.tensor.add_bw(grad_tensor, input_tensor, other_tensor)

Expand All @@ -38,5 +34,5 @@ def test_bw_add(input_shapes, device):
golden_tensor.append(in_data.grad)
golden_tensor.append(other_data.grad)

status = compare_results(tt_output_tensor_on_device, golden_tensor, comparison_funcs.comp_pcc)
status = compare_results(tt_output_tensor_on_device, golden_tensor)
assert status
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@
import torch
import pytest
import tt_lib
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
)
from loguru import logger
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import *


@pytest.mark.parametrize(
Expand All @@ -21,27 +18,11 @@
)
@pytest.mark.parametrize("alpha", [0.05, 1.0, 0.5, 0.12])
def test_bw_addalpha(input_shapes, alpha, device):
torch.manual_seed(0)
in_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
other_data = torch.randn(input_shapes, requires_grad=True).bfloat16()

grad_data = torch.randn(input_shapes).bfloat16()

grad_tensor = (
tt_lib.tensor.Tensor(grad_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

input_tensor = (
tt_lib.tensor.Tensor(in_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

other_tensor = (
tt_lib.tensor.Tensor(other_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
in_data, input_tensor = data_gen_pt_tt(input_shapes, device, True)
other_data, other_tensor = data_gen_pt_tt(input_shapes, device, True)
grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device)

tt_output_tensor_on_device = tt_lib.tensor.addalpha_bw(grad_tensor, input_tensor, other_tensor, alpha)
tt_output_tensor_a = tt_output_tensor_on_device[0].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()
tt_output_tensor_b = tt_output_tensor_on_device[1].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()

in_data.retain_grad()
other_data.retain_grad()
Expand All @@ -50,12 +31,9 @@ def test_bw_addalpha(input_shapes, alpha, device):

pyt_y.backward(gradient=grad_data)

golden_output_tensor_a = in_data.grad
golden_output_tensor_b = other_data.grad

comp_pass_a, comp_out_a = comparison_funcs.comp_pcc(golden_output_tensor_a, tt_output_tensor_a)
comp_pass_b, comp_out_b = comparison_funcs.comp_pcc(golden_output_tensor_b, tt_output_tensor_b)
golden_tensor = list()
golden_tensor.append(in_data.grad)
golden_tensor.append(other_data.grad)

logger.info(comp_out_a)
logger.info(comp_out_b)
assert comp_pass_a & comp_pass_b
status = compare_results(tt_output_tensor_on_device, golden_tensor)
assert status
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@
import torch
import pytest
import tt_lib
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
)
from loguru import logger
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import *


@pytest.mark.parametrize(
Expand All @@ -21,34 +18,15 @@
)
@pytest.mark.parametrize("value", [0.05, 1.0, 0.5, 5.0])
def test_bw_addcdiv(input_shapes, value, device):
torch.manual_seed(0)
in_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
tensor1_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
tensor2_data = torch.randn(input_shapes, requires_grad=True).bfloat16()

grad_data = torch.randn(input_shapes).bfloat16()
in_data, input_tensor = data_gen_pt_tt(input_shapes, device, True)
tensor1_data, tensor1_tensor = data_gen_pt_tt(input_shapes, device, True)
tensor2_data, tensor2_tensor = data_gen_pt_tt(input_shapes, device, True)

grad_tensor = (
tt_lib.tensor.Tensor(grad_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

input_tensor = (
tt_lib.tensor.Tensor(in_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

tensor1_tensor = (
tt_lib.tensor.Tensor(tensor1_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
tensor2_tensor = (
tt_lib.tensor.Tensor(tensor2_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device, False)

tt_output_tensor_on_device = tt_lib.tensor.addcdiv_bw(
grad_tensor, input_tensor, tensor1_tensor, tensor2_tensor, value
)
tt_output_tensor_a = tt_output_tensor_on_device[0].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()
tt_output_tensor_b = tt_output_tensor_on_device[1].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()
tt_output_tensor_c = tt_output_tensor_on_device[2].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()

in_data.retain_grad()
tensor1_data.retain_grad()
Expand All @@ -58,15 +36,10 @@ def test_bw_addcdiv(input_shapes, value, device):

pyt_y.backward(gradient=grad_data)

golden_output_tensor_a = in_data.grad
golden_output_tensor_b = tensor1_data.grad
golden_output_tensor_c = tensor2_data.grad

comp_pass_a, comp_out_a = comparison_funcs.comp_pcc(golden_output_tensor_a, tt_output_tensor_a)
comp_pass_b, comp_out_b = comparison_funcs.comp_pcc(golden_output_tensor_b, tt_output_tensor_b)
comp_pass_c, comp_out_c = comparison_funcs.comp_pcc(golden_output_tensor_c, tt_output_tensor_c)
golden_tensor = list()
golden_tensor.append(in_data.grad)
golden_tensor.append(tensor1_data.grad)
golden_tensor.append(tensor2_data.grad)

logger.info(comp_out_a)
logger.info(comp_out_b)
logger.info(comp_out_c)
assert comp_pass_a & comp_pass_b & comp_pass_c
status = compare_results(tt_output_tensor_on_device, golden_tensor)
assert status
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@
import torch
import pytest
import tt_lib
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
)
from loguru import logger
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import *


@pytest.mark.parametrize(
Expand All @@ -21,34 +18,15 @@
)
@pytest.mark.parametrize("value", [0.05, 1.0, 0.5, 0.12])
def test_bw_addcmul(input_shapes, value, device):
torch.manual_seed(0)
in_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
tensor1_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
tensor2_data = torch.randn(input_shapes, requires_grad=True).bfloat16()

grad_data = torch.randn(input_shapes).bfloat16()
in_data, input_tensor = data_gen_pt_tt(input_shapes, device, True)
tensor1_data, tensor1_tensor = data_gen_pt_tt(input_shapes, device, True)
tensor2_data, tensor2_tensor = data_gen_pt_tt(input_shapes, device, True)

grad_tensor = (
tt_lib.tensor.Tensor(grad_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

input_tensor = (
tt_lib.tensor.Tensor(in_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

tensor1_tensor = (
tt_lib.tensor.Tensor(tensor1_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
tensor2_tensor = (
tt_lib.tensor.Tensor(tensor2_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device)

tt_output_tensor_on_device = tt_lib.tensor.addcmul_bw(
grad_tensor, input_tensor, tensor1_tensor, tensor2_tensor, value
)
tt_output_tensor_a = tt_output_tensor_on_device[0].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()
tt_output_tensor_b = tt_output_tensor_on_device[1].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()
tt_output_tensor_c = tt_output_tensor_on_device[2].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()

in_data.retain_grad()
tensor1_data.retain_grad()
Expand All @@ -58,15 +36,10 @@ def test_bw_addcmul(input_shapes, value, device):

pyt_y.backward(gradient=grad_data)

golden_output_tensor_a = in_data.grad
golden_output_tensor_b = tensor1_data.grad
golden_output_tensor_c = tensor2_data.grad

comp_pass_a, comp_out_a = comparison_funcs.comp_pcc(golden_output_tensor_a, tt_output_tensor_a)
comp_pass_b, comp_out_b = comparison_funcs.comp_pcc(golden_output_tensor_b, tt_output_tensor_b)
comp_pass_c, comp_out_c = comparison_funcs.comp_pcc(golden_output_tensor_c, tt_output_tensor_c)
golden_tensor = list()
golden_tensor.append(in_data.grad)
golden_tensor.append(tensor1_data.grad)
golden_tensor.append(tensor2_data.grad)

logger.info(comp_out_a)
logger.info(comp_out_b)
logger.info(comp_out_c)
assert comp_pass_a & comp_pass_b & comp_pass_c
status = compare_results(tt_output_tensor_on_device, golden_tensor)
assert status
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@
import torch
import pytest
import tt_lib
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
)
from loguru import logger
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import *


@pytest.mark.parametrize(
Expand All @@ -20,34 +17,20 @@
),
)
def test_bw_binary_assign(input_shapes, device):
torch.manual_seed(0)
in_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
other_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
grad_data = torch.randn(input_shapes).bfloat16()

grad_tensor = (
tt_lib.tensor.Tensor(grad_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
in_data, input_tensor = data_gen_pt_tt(input_shapes, device, True)
other_data, other_tensor = data_gen_pt_tt(input_shapes, device, True)

input_tensor = (
tt_lib.tensor.Tensor(in_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

other_tensor = (
tt_lib.tensor.Tensor(other_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device)

tt_output_tensor_on_device = tt_lib.tensor.binary_assign_bw(grad_tensor, input_tensor, other_tensor)
tt_output_tensor = tt_output_tensor_on_device[0].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()

in_data.retain_grad()

pyt_y = torch.clone(in_data)

pyt_y.backward(gradient=grad_data)

golden_output_tensor = in_data.grad

comp_pass, comp_out = comparison_funcs.comp_equal(golden_output_tensor, tt_output_tensor)
logger.info(comp_out)
assert comp_pass
golden_tensor = list()
golden_tensor.append(in_data.grad)
status = compare_results(tt_output_tensor_on_device, golden_tensor)
assert status
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@
import torch
import pytest
import tt_lib
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
)
from loguru import logger
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import *


@pytest.mark.parametrize(
Expand All @@ -20,27 +17,13 @@
),
)
def test_bw_binary_le(input_shapes, device):
torch.manual_seed(12386)
in_data = torch.randn(input_shapes, requires_grad=True).bfloat16()
grad_data = torch.randn(input_shapes).bfloat16()

grad_tensor = (
tt_lib.tensor.Tensor(grad_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)

input_tensor = (
tt_lib.tensor.Tensor(in_data, tt_lib.tensor.DataType.BFLOAT16).to(tt_lib.tensor.Layout.TILE).to(device)
)
in_data, input_tensor = data_gen_pt_tt(input_shapes, device, True)
grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device)

tt_output_tensor_on_device = tt_lib.tensor.binary_le_bw(grad_tensor, input_tensor)
pyt_y = torch.zeros_like(grad_data)

tt_output_tensor_1 = tt_output_tensor_on_device[0].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()
tt_output_tensor_2 = tt_output_tensor_on_device[1].cpu().to(tt_lib.tensor.Layout.ROW_MAJOR).to_torch()

golden_output_tensor = pyt_y

comp_pass, comp_out = comparison_funcs.comp_pcc(golden_output_tensor, tt_output_tensor_1)
comp_pass, comp_out = comparison_funcs.comp_pcc(golden_output_tensor, tt_output_tensor_2)
logger.info(comp_out)
pt_y = torch.zeros_like(grad_data)
golden_tensor = list()
golden_tensor.append(pt_y)
golden_tensor.append(pt_y)
comp_pass = compare_results(tt_output_tensor_on_device, golden_tensor)
assert comp_pass
Loading

0 comments on commit 8a04f8a

Please sign in to comment.