diff --git a/ttnn/cpp/ttnn/operations/data_movement/pad/pad_pybind.hpp b/ttnn/cpp/ttnn/operations/data_movement/pad/pad_pybind.hpp index 3effaaeac67..9f30744c289 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/pad/pad_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/data_movement/pad/pad_pybind.hpp @@ -82,7 +82,7 @@ void bind_pad(py::module& module) { py::arg("input_tensor_start"), py::arg("value"), py::kw_only(), - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("queue_id") = 0, }, @@ -102,7 +102,7 @@ void bind_pad(py::module& module) { py::arg("input_tensor_start"), py::arg("value"), py::kw_only(), - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("queue_id") = 0, }, @@ -122,7 +122,7 @@ void bind_pad(py::module& module) { py::arg("input_tensor_start"), py::arg("value"), py::kw_only(), - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("queue_id") = 0, }, @@ -142,7 +142,7 @@ void bind_pad(py::module& module) { py::arg("input_tensor_start"), py::arg("value"), py::kw_only(), - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("queue_id") = 0, }, @@ -162,7 +162,7 @@ void bind_pad(py::module& module) { py::arg("input_tensor_start"), py::arg("value"), py::kw_only(), - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("queue_id") = 0, }, @@ -182,7 +182,7 @@ void bind_pad(py::module& module) { py::arg("input_tensor_start"), py::arg("value"), py::kw_only(), - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("queue_id") = 0, }, @@ -202,7 +202,7 @@ void bind_pad(py::module& module) { py::arg("input_tensor_start"), py::arg("value"), py::kw_only(), - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("queue_id") = 0, }, @@ -222,7 +222,7 @@ void bind_pad(py::module& module) { py::arg("input_tensor_start"), py::arg("value"), py::kw_only(), - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("queue_id") = 0, } diff --git a/ttnn/cpp/ttnn/operations/data_movement/tilize/tilize.hpp b/ttnn/cpp/ttnn/operations/data_movement/tilize/tilize.hpp index d609fafde27..95715e82164 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/tilize/tilize.hpp +++ b/ttnn/cpp/ttnn/operations/data_movement/tilize/tilize.hpp @@ -15,13 +15,13 @@ struct ExecuteTilize { const ttnn::Tensor &input_tensor, const std::optional &memory_config = std::nullopt, std::optional output_dtype = std::nullopt, - bool use_multicore = false); + bool use_multicore = true); static ttnn::Tensor invoke( const ttnn::Tensor &input_tensor, const std::optional &memory_config = std::nullopt, std::optional output_dtype = std::nullopt, - bool use_multicore = false); + bool use_multicore = true); }; } // namespace operations::data_movement diff --git a/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding.hpp b/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding.hpp index b05139c3708..390518b13a2 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding.hpp +++ b/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding.hpp @@ -22,7 +22,7 @@ struct ExecuteTilizeWithValPadding { const PadValue pad_value, const std::optional &memory_config = std::nullopt, std::optional output_dtype = std::nullopt, - bool use_multicore = false); + bool use_multicore = true); static ttnn::Tensor invoke( const ttnn::Tensor &input_tensor, @@ -30,7 +30,7 @@ struct ExecuteTilizeWithValPadding { const PadValue pad_value, const std::optional &memory_config = std::nullopt, std::optional output_dtype = std::nullopt, - bool use_multicore = false); + bool use_multicore = true); }; @@ -41,13 +41,13 @@ struct ExecuteTilizeWithZeroPadding { const ttnn::Tensor &input_tensor, const std::optional &memory_config = std::nullopt, std::optional output_dtype = std::nullopt, - bool use_multicore = false); + bool use_multicore = true); static ttnn::Tensor invoke( const ttnn::Tensor &input_tensor, const std::optional &memory_config = std::nullopt, std::optional output_dtype = std::nullopt, - bool use_multicore = false); + bool use_multicore = true); }; } // namespace operations::data_movement diff --git a/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding_pybind.hpp b/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding_pybind.hpp index fd7c84c7d3e..66c39137725 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding_pybind.hpp @@ -61,7 +61,7 @@ void bind_tilize_with_val_padding(py::module &module) { py::kw_only(), py::arg("memory_config") = std::nullopt, py::arg("dtype") = std::nullopt, - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("queue_id") = 0, } @@ -105,7 +105,7 @@ void bind_tilize_with_zero_padding(py::module &module) { py::kw_only(), py::arg("memory_config") = std::nullopt, py::arg("output_dtype") = std::nullopt, - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("queue_id") = 0, }); } diff --git a/ttnn/cpp/ttnn/operations/data_movement/untilize_with_unpadding/untilize_with_unpadding.hpp b/ttnn/cpp/ttnn/operations/data_movement/untilize_with_unpadding/untilize_with_unpadding.hpp index 7fb555eee7d..a3e7fa17487 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/untilize_with_unpadding/untilize_with_unpadding.hpp +++ b/ttnn/cpp/ttnn/operations/data_movement/untilize_with_unpadding/untilize_with_unpadding.hpp @@ -15,14 +15,14 @@ struct ExecuteUntilizeWithUnpadding { const ttnn::Tensor &input_tensor, const tt::tt_metal::LegacyShape &output_tensor_end, const std::optional &memory_config, - bool use_multicore = false, + bool use_multicore = true, bool use_pack_untilize = true); static ttnn::Tensor invoke( const ttnn::Tensor &input_tensor, const tt::tt_metal::LegacyShape &output_tensor_end, const std::optional &memory_config, - bool use_multicore = false, + bool use_multicore = true, bool use_pack_untilize = true); }; diff --git a/ttnn/cpp/ttnn/operations/data_movement/untilize_with_unpadding/untilize_with_unpadding_pybind.hpp b/ttnn/cpp/ttnn/operations/data_movement/untilize_with_unpadding/untilize_with_unpadding_pybind.hpp index 82d25ad257a..6a128d5a347 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/untilize_with_unpadding/untilize_with_unpadding_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/data_movement/untilize_with_unpadding/untilize_with_unpadding_pybind.hpp @@ -55,7 +55,7 @@ void bind_untilize_with_unpadding(py::module &module) { py::arg("output_tensor_end"), py::kw_only(), py::arg("memory_config") = std::nullopt, - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("use_pack_untilize") = true, py::arg("queue_id") = 0, }); diff --git a/ttnn/cpp/ttnn/operations/reduction/argmax/argmax_pybind.hpp b/ttnn/cpp/ttnn/operations/reduction/argmax/argmax_pybind.hpp index 98ff4040535..f9826c96b92 100644 --- a/ttnn/cpp/ttnn/operations/reduction/argmax/argmax_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/reduction/argmax/argmax_pybind.hpp @@ -64,7 +64,7 @@ void bind_reduction_argmax_operation(py::module& module) { py::arg("input_tensor").noconvert(), py::kw_only(), py::arg("dim") = std::nullopt, - py::arg("use_multicore") = false, + py::arg("use_multicore") = true, py::arg("memory_config") = std::nullopt, py::arg("output_tensor") = std::nullopt, py::arg("queue_id") = 0});