Skip to content

Commit

Permalink
#3932: Rename unary op args which were input_a -> input, binary ops f…
Browse files Browse the repository at this point in the history
…rom input, other -> input_a, input_b
  • Loading branch information
tt-aho committed Dec 6, 2023
1 parent 2ab4da8 commit ff38868
Show file tree
Hide file tree
Showing 8 changed files with 83 additions and 83 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ def test_primary_moreh_matmul(params, device):
cpu_layout = ttl.tensor.Layout.ROW_MAJOR
tt_output = (
ttl.operations.primary.moreh_matmul(
tt_input, tt_other, transpose_input=transpose_input, transpose_other=transpose_other
tt_input, tt_other, transpose_input_a=transpose_input, transpose_input_b=transpose_other
)
.cpu()
.to(cpu_layout)
Expand Down
16 changes: 8 additions & 8 deletions tt_eager/tt_lib/csrc/operations/primary/module.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -343,12 +343,12 @@ void py_module(py::module& m_primary) {
m_primary.def(
"moreh_matmul",
&moreh_matmul,
py::arg("input_tensor").noconvert(),
py::arg("other_tensor").noconvert(),
py::arg("input_a").noconvert(),
py::arg("input_b").noconvert(),
py::kw_only(),
py::arg("output_tensor").noconvert() = std::nullopt,
py::arg("transpose_input").noconvert() = false,
py::arg("transpose_other").noconvert() = false,
py::arg("transpose_input_a").noconvert() = false,
py::arg("transpose_input_b").noconvert() = false,
py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG,
"Performs a moreh_matmul operation.");

Expand All @@ -357,10 +357,10 @@ void py_module(py::module& m_primary) {
"moreh_matmul_backward",
&moreh_matmul_backward,
py::arg("output_grad").noconvert(),
py::arg("input").noconvert(),
py::arg("other").noconvert(),
py::arg("input_grad").noconvert() = std::nullopt,
py::arg("other_grad").noconvert() = std::nullopt,
py::arg("input_a").noconvert(),
py::arg("input_b").noconvert(),
py::arg("input_a_grad").noconvert() = std::nullopt,
py::arg("input_b_grad").noconvert() = std::nullopt,
py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG,
R"doc(
"Performs a moreh_matmul_backward operation.
Expand Down
6 changes: 3 additions & 3 deletions tt_eager/tt_lib/csrc/tt_lib_bindings_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -335,14 +335,14 @@ void TensorModule(py::module &m_tensor) {

// moreh_linear
m_tensor.def("moreh_matmul", &moreh_matmul,
py::arg("input").noconvert(), py::arg("other").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
py::arg("input_a").noconvert(), py::arg("input_b").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
"Performs a moreh_linear operation.
.. csv-table::
:header: "Argument", "Description", "Data type", "Valid range", "Required"
"input", "Input tensor", "Tensor", "", "Yes"
"other", "Other tensor", "Tensor", "", "Yes"
"input_a", "First input tensor", "Tensor", "", "Yes"
"input_b", "Second input tensor", "Tensor", "", "Yes"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

Expand Down
40 changes: 20 additions & 20 deletions tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ namespace tt::tt_metal::detail{
void TensorModuleBackwardOPs( py::module & m_tensor){

m_tensor.def("addalpha_bw", &tt::tt_metal::addalpha_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("other").noconvert(), py::arg("alpha") = 1.0f, py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for multiplication of ``other`` and ``alpha`` tensors with given ``grad``.
py::arg("grad").noconvert(), py::arg("input_a").noconvert(), py::arg("input_b").noconvert(), py::arg("alpha") = 1.0f, py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for multiplication of ``input_b`` and ``alpha`` tensors with given ``grad``.
Input tensor must have BFLOAT16 data type.
Expand All @@ -21,8 +21,8 @@ namespace tt::tt_metal::detail{
:header: "Argument", "Description", "Data type", "Valid range", "Required"
"grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input", "Tensor addalpha is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"other", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_a", "Tensor addalpha is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_b", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"alpha", "Alpha value", "float", "default to 1.0f", "No"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");
Expand Down Expand Up @@ -165,8 +165,8 @@ namespace tt::tt_metal::detail{
)doc");

m_tensor.def("add_bw", &tt::tt_metal::add_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("other").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for addition of ``other`` tensors with given ``grad``.
py::arg("grad").noconvert(), py::arg("input_a").noconvert(), py::arg("input_b").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for addition of ``input_b`` tensors with given ``grad``.
Input tensor must have BFLOAT16 data type.
Expand All @@ -176,8 +176,8 @@ namespace tt::tt_metal::detail{
:header: "Argument", "Description", "Data type", "Valid range", "Required"
"grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input", "Tensor add is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"other", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_a", "Tensor add is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_b", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

Expand Down Expand Up @@ -249,8 +249,8 @@ namespace tt::tt_metal::detail{
)doc");

m_tensor.def("div_bw", &tt::tt_metal::div_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("other").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for division of ``other`` with given ``grad``.
py::arg("grad").noconvert(), py::arg("input_a").noconvert(), py::arg("input_b").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for division of ``input_b`` with given ``grad``.
Input tensor must have BFLOAT16 data type.
Expand All @@ -260,14 +260,14 @@ namespace tt::tt_metal::detail{
:header: "Argument", "Description", "Data type", "Valid range", "Required"
"grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input", "Tensor div is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"other", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_a", "Tensor div is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_b", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

m_tensor.def("max_bw", &tt::tt_metal::max_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("other").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for maximum of ``other`` with given ``grad``.
py::arg("grad").noconvert(), py::arg("input_a").noconvert(), py::arg("input_b").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for maximum of ``input_b`` with given ``grad``.
Input tensor must have BFLOAT16 data type.
Expand All @@ -277,15 +277,15 @@ namespace tt::tt_metal::detail{
:header: "Argument", "Description", "Data type", "Valid range", "Required"
"grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input", "Tensor max is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"other", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_a", "Tensor max is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_b", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");


m_tensor.def("where_bw", &tt::tt_metal::where_bw,
py::arg("grad").noconvert(), py::arg("condition").noconvert(), py::arg("input").noconvert(), py::arg("other").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for where selected from either ``input`` or ``other``, depending on ``condition`` with given ``grad``.
py::arg("grad").noconvert(), py::arg("condition").noconvert(), py::arg("input_a").noconvert(), py::arg("input_b").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for where selected from either ``input_a`` or ``input_b``, depending on ``condition`` with given ``grad``.
When condition True (nonzero), yield grad, otherwise yield zero's.
Input tensor must have BFLOAT16 data type.
Expand All @@ -297,8 +297,8 @@ namespace tt::tt_metal::detail{
"grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"condition", "Tensor", "Bool", "Tensor of shape [W, Z, Y, X]", "Yes"
"input", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"other", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_a", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input_b", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

Expand Down
Loading

0 comments on commit ff38868

Please sign in to comment.