Skip to content

Commit

Permalink
#8709: ported creation operations to C++
Browse files Browse the repository at this point in the history
  • Loading branch information
arakhmati committed May 23, 2024
1 parent 392a717 commit 2632a5c
Show file tree
Hide file tree
Showing 13 changed files with 609 additions and 487 deletions.
16 changes: 10 additions & 6 deletions tests/ttnn/unit_tests/gtests/test_add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
// SPDX-License-Identifier: Apache-2.0

#include "tests/tt_metal/tt_metal/unit_tests_common/common/common_fixture.hpp"
#include "ttnn_test_fixtures.hpp"
#include "ttnn/device.hpp"
#include "ttnn/operations/binary.hpp"
#include "ttnn/operations/core.hpp"
#include "ttnn/operations/creation.hpp"
#include "ttnn_test_fixtures.hpp"

namespace ttnn {
namespace operations {
Expand All @@ -26,13 +26,17 @@ class Add1DTensorAndScalarFixture : public TTNNFixture,
TEST_P(Add1DTensorAndScalarFixture, AddsScalarCorrectly) {
auto param = GetParam();
const auto device_id = 0;
auto &device = ttnn::open_device(device_id);
auto& device = ttnn::open_device(device_id);
std::array<uint32_t, 2> dimensions = {param.h, param.w};
ttnn::Shape shape(dimensions);
const auto input_tensor = ttnn::zeros(shape, ttnn::bfloat16, ttnn::TILE_LAYOUT, device);
const auto output_tensor = input_tensor + param.scalar;
const auto expected_tensor = ttnn::full(shape, param.scalar, ttnn::bfloat16, ttnn::TILE_LAYOUT, device);
TT_FATAL(tt::numpy::allclose<::bfloat16>(ttnn::from_device(expected_tensor), ttnn::from_device(output_tensor)));

{
const auto input_tensor = ttnn::zeros(shape, ttnn::bfloat16, ttnn::TILE_LAYOUT, device, std::nullopt);
const auto output_tensor = input_tensor + param.scalar;
const auto expected_tensor = ttnn::operations::creation::full(
shape, param.scalar, ttnn::bfloat16, ttnn::TILE_LAYOUT, device, std::nullopt);
TT_FATAL(tt::numpy::allclose<::bfloat16>(ttnn::from_device(expected_tensor), ttnn::from_device(output_tensor)));
}
ttnn::close_device(device);
}

Expand Down
2 changes: 1 addition & 1 deletion tests/ttnn/unit_tests/module.mk
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

TTNN_UNIT_TESTS_HOME_DIR = $(TT_METAL_HOME)/tests/ttnn/unit_tests

TTNN_UNIT_TESTS_DIRS := $(TTNN_UNIT_TESTS_HOME_DIR) $(TTNN_UNIT_TESTS_HOME_DIR)/operations
TTNN_UNIT_TESTS_DIRS := $(TTNN_UNIT_TESTS_HOME_DIR) $(TTNN_UNIT_TESTS_HOME_DIR)/gtests

TTNN_UNIT_TESTS_SRCS := $(foreach dir,$(TTNN_UNIT_TESTS_DIRS),$(wildcard $(dir)/*.cpp))

Expand Down
211 changes: 101 additions & 110 deletions tt_eager/tt_numpy/functions.hpp

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions ttnn/cpp/pybind11/operations/__init__.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "ccl.hpp"
#include "core.hpp"
#include "conv2d.hpp"
#include "creation.hpp"
#include "data_movement.hpp"
#include "embedding.hpp"
#include "kv_cache.hpp"
Expand Down Expand Up @@ -38,6 +39,9 @@ void py_module(py::module& module) {
auto m_core = module.def_submodule("core", "core operations");
core::py_module(m_core);

auto m_creation = module.def_submodule("creation", "creation operations");
creation::py_module(m_creation);

auto m_embedding = module.def_submodule("embedding", "embedding operations");
embedding::py_module(m_embedding);

Expand Down
30 changes: 15 additions & 15 deletions ttnn/cpp/pybind11/operations/binary.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ namespace binary {
namespace detail {

template <typename binary_operation_t>
void bind_binary(py::module& module, const binary_operation_t& operation, const std::string& description) {
void bind_binary_operation(py::module& module, const binary_operation_t& operation, const std::string& description) {
auto doc = fmt::format(
R"doc({0}(input_tensor_a: ttnn.Tensor, input_tensor_b: Union[ttnn.Tensor, int, float], *, memory_config: Optional[ttnn.MemoryConfig] = None, dtype: Optional[ttnn.DataType] = None, activations: Optional[List[str]] = None) -> ttnn.Tensor
Expand Down Expand Up @@ -84,86 +84,86 @@ void bind_binary(py::module& module, const binary_operation_t& operation, const
} // namespace detail

void py_module(py::module& module) {
detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::add,
R"doc(Adds :attr:`input_tensor_a` to :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{ input\_tensor\_a }}_i + \mathrm{{ input\_tensor\_b }}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::add_,
R"doc(Adds :attr:`input_tensor_a` to :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a` in-place
.. math:: \mathrm{{input\_tensor\_a}}_i + \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::subtract,
R"doc(Subtracts :attr:`input_tensor_b` from :attr:`input_tensor_a` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{ input\_tensor\_a }}_i - \mathrm{{ input\_tensor\_b }}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::subtract_,
R"doc(Subtracts :attr:`input_tensor_b` from :attr:`input_tensor_a` and returns the tensor with the same layout as :attr:`input_tensor_a` in-place
.. math:: \mathrm{{input\_tensor\_a}}_i - \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::multiply,
R"doc(Multiplies :attr:`input_tensor_a` by :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{ input\_tensor\_a }}_i \times \mathrm{{ input\_tensor\_b }}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::multiply_,
R"doc(Multiplies :attr:`input_tensor_a` by :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a` in-place
.. math:: \mathrm{{input\_tensor\_a}}_i \times \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::eq,
R"doc(Compares if :attr:`input_tensor_a` is equal to :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{input\_tensor\_a}}_i == \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::ne,
R"doc(Compares if :attr:`input_tensor_a` is not equal to :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{input\_tensor\_a}}_i != \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::lt,
R"doc(Compares if :attr:`input_tensor_a` is less than :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{input\_tensor\_a}}_i < \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::le,
R"doc(MCompares if :attr:`input_tensor_a` is less than or equal to :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{input\_tensor\_a}}_i <= \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::gt,
R"doc(Compares if :attr:`input_tensor_a` is greater than :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{input\_tensor\_a}}_i > \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::ge,
R"doc(Compares if :attr:`input_tensor_a` is greater than or equal to :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{input\_tensor\_a}}_i >= \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::logical_and,
R"doc(Compute logical AND of :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
.. math:: \mathrm{{input\_tensor\_a}}_i && \mathrm{{input\_tensor\_b}}_i)doc");

detail::bind_binary(
detail::bind_binary_operation(
module,
ttnn::logical_or,
R"doc(Compute logical OR of :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`
Expand Down
217 changes: 217 additions & 0 deletions ttnn/cpp/pybind11/operations/creation.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
// SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.
//
// SPDX-License-Identifier: Apache-2.0

#pragma once

#include <pybind11/pybind11.h>
#include <pybind11/stl.h>

#include "ttnn/cpp/pybind11/decorators.hpp"
#include "ttnn/operations/creation.hpp"

namespace py = pybind11;

namespace ttnn {
namespace operations {
namespace creation {

namespace detail {

template <typename creation_operation_t>
void bind_full_operation(py::module& module, const creation_operation_t& operation) {
auto doc = fmt::format(
R"doc({0}(shape: ttnn.Shape, fill_value: Union[int, float], dtype: Optional[ttnn.DataType] = None, layout: Optional[ttnn.Layout] = None, device: Optional[ttnn.Device] = None, memory_config: Optional[ttnn.MemoryConfig] = None)doc",
operation.name());

bind_registered_operation(
module,
operation,
doc,
ttnn::pybind_overload_t{
[](const creation_operation_t& self,
const std::vector<uint32_t>& shape,
const float fill_value,
const std::optional<DataType>& dtype,
const std::optional<Layout>& layout,
const std::optional<std::reference_wrapper<Device>>& device,
const std::optional<MemoryConfig>& memory_config) -> ttnn::Tensor {
return self(ttnn::Shape{tt::tt_metal::Shape{shape}}, fill_value, dtype, layout, device, memory_config);
},
py::arg("shape"),
py::arg("fill_value"),
py::arg("dtype") = std::nullopt,
py::arg("layout") = std::nullopt,
py::arg("device") = std::nullopt,
py::arg("memory_config") = std::nullopt},
ttnn::pybind_overload_t{
[](const creation_operation_t& self,
const std::vector<uint32_t>& shape,
const int fill_value,
const std::optional<DataType>& dtype,
const std::optional<Layout>& layout,
const std::optional<std::reference_wrapper<Device>>& device,
const std::optional<MemoryConfig>& memory_config) -> ttnn::Tensor {
return self(ttnn::Shape{tt::tt_metal::Shape{shape}}, fill_value, dtype, layout, device, memory_config);
},
py::arg("shape"),
py::arg("fill_value"),
py::arg("dtype") = std::nullopt,
py::arg("layout") = std::nullopt,
py::arg("device") = std::nullopt,
py::arg("memory_config") = std::nullopt});
}

template <typename creation_operation_t>
void bind_full_operation_with_hard_coded_value(py::module& module, const creation_operation_t& operation) {
auto doc = fmt::format(
R"doc({0}(shape: ttnn.Shape, dtype: Optional[ttnn.DataType] = None, layout: Optional[ttnn.Layout] = None, device: Optional[ttnn.Device] = None, memory_config: Optional[ttnn.MemoryConfig] = None)doc",
operation.name());

bind_registered_operation(
module,
operation,
doc,
ttnn::pybind_overload_t{
[](const creation_operation_t& self,
const std::vector<uint32_t>& shape,
const std::optional<DataType>& dtype,
const std::optional<Layout>& layout,
const std::optional<std::reference_wrapper<Device>>& device,
const std::optional<MemoryConfig>& memory_config) -> ttnn::Tensor {
return self(ttnn::Shape{tt::tt_metal::Shape{shape}}, dtype, layout, device, memory_config);
},
py::arg("shape"),
py::arg("dtype") = std::nullopt,
py::arg("layout") = std::nullopt,
py::arg("device") = std::nullopt,
py::arg("memory_config") = std::nullopt});
}

template <typename creation_operation_t>
void bind_full_like_operation(py::module& module, const creation_operation_t& operation) {
auto doc = fmt::format(
R"doc({0}(tensor: ttnn.Tensor, fill_value: Union[int, float], dtype: Optional[ttnn.DataType] = None, layout: Optional[ttnn.Layout] = None, device: Optional[ttnn.Device] = None, memory_config: Optional[ttnn.MemoryConfig] = None)doc",
operation.name());

bind_registered_operation(
module,
operation,
doc,
ttnn::pybind_overload_t{
[](const creation_operation_t& self,
const ttnn::Tensor& tensor,
const float fill_value,
const std::optional<DataType>& dtype,
const std::optional<Layout>& layout,
const std::optional<std::reference_wrapper<Device>>& device,
const std::optional<MemoryConfig>& memory_config) -> ttnn::Tensor {
return self(tensor, fill_value, dtype, layout, device, memory_config);
},
py::arg("tensor"),
py::arg("fill_value"),
py::arg("dtype") = std::nullopt,
py::arg("layout") = std::nullopt,
py::arg("device") = std::nullopt,
py::arg("memory_config") = std::nullopt},
ttnn::pybind_overload_t{
[](const creation_operation_t& self,
const ttnn::Tensor& tensor,
const int fill_value,
const std::optional<DataType>& dtype,
const std::optional<Layout>& layout,
const std::optional<std::reference_wrapper<Device>>& device,
const std::optional<MemoryConfig>& memory_config) -> ttnn::Tensor {
return self(tensor, fill_value, dtype, layout, device, memory_config);
},
py::arg("tensor"),
py::arg("fill_value"),
py::arg("dtype") = std::nullopt,
py::arg("layout") = std::nullopt,
py::arg("device") = std::nullopt,
py::arg("memory_config") = std::nullopt});
}

template <typename creation_operation_t>
void bind_full_like_operation_with_hard_coded_value(py::module& module, const creation_operation_t& operation) {
auto doc = fmt::format(
R"doc({0}(tensor: ttnn.Tensor, dtype: Optional[ttnn.DataType] = None, layout: Optional[ttnn.Layout] = None, device: Optional[ttnn.Device] = None, memory_config: Optional[ttnn.MemoryConfig] = None)doc",
operation.name());

bind_registered_operation(
module,
operation,
doc,
ttnn::pybind_overload_t{
[](const creation_operation_t& self,
const ttnn::Tensor& tensor,
const std::optional<DataType>& dtype,
const std::optional<Layout>& layout,
const std::optional<std::reference_wrapper<Device>>& device,
const std::optional<MemoryConfig>& memory_config) -> ttnn::Tensor {
return self(tensor, dtype, layout, device, memory_config);
},
py::arg("tensor"),
py::arg("dtype") = std::nullopt,
py::arg("layout") = std::nullopt,
py::arg("device") = std::nullopt,
py::arg("memory_config") = std::nullopt});
}

template <typename creation_operation_t>
void bind_arange_operation(py::module& module, const creation_operation_t& operation) {
auto doc = fmt::format(
R"doc({0}(start: int = 0, stop: int, step: int = 1, dtype: ttnn.DataType = ttnn.bfloat16, device: ttnn.Device = None, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG)doc",
operation.name());

bind_registered_operation(
module,
operation,
doc,
ttnn::pybind_overload_t{
[](const creation_operation_t& self,
const int64_t stop,
const DataType dtype,
const std::optional<std::reference_wrapper<Device>>& device,
const MemoryConfig& memory_config) -> ttnn::Tensor { return self(stop, dtype, device, memory_config); },
py::arg("stop"),
py::arg("dtype") = ttnn::bfloat16,
py::arg("device") = std::nullopt,
py::arg("memory_config") = ttnn::DRAM_MEMORY_CONFIG} // namespace detail
,
ttnn::pybind_overload_t{
[](const creation_operation_t& self,
const int64_t start,
const int64_t stop,
const int64_t step,
const DataType dtype,
const std::optional<std::reference_wrapper<Device>>& device,
const MemoryConfig& memory_config) -> ttnn::Tensor {
return self(start, stop, step, dtype, device, memory_config);
},
py::arg("start"),
py::arg("stop"),
py::arg("step") = 1,
py::arg("dtype") = ttnn::bfloat16,
py::arg("device") = std::nullopt,
py::arg("memory_config") = ttnn::DRAM_MEMORY_CONFIG});
} // namespace creation
} // namespace detail

void py_module(py::module& module) {
detail::bind_full_operation(module, ttnn::full);
detail::bind_full_operation_with_hard_coded_value(module, ttnn::zeros);
detail::bind_full_operation_with_hard_coded_value(module, ttnn::ones);
detail::bind_full_operation_with_hard_coded_value(module, ttnn::empty);

detail::bind_full_like_operation(module, ttnn::full_like);
detail::bind_full_like_operation_with_hard_coded_value(module, ttnn::zeros_like);
detail::bind_full_like_operation_with_hard_coded_value(module, ttnn::ones_like);
detail::bind_full_like_operation_with_hard_coded_value(module, ttnn::empty_like);

detail::bind_arange_operation(module, ttnn::arange);
}

} // namespace creation
} // namespace operations
} // namespace ttnn
Loading

0 comments on commit 2632a5c

Please sign in to comment.