Skip to content

Commit

Permalink
[et][dim order] dim order variant empty operator
Browse files Browse the repository at this point in the history
Pull Request resolved: #7154

ghstack-source-id: 256177391
@exported-using-ghexport


This diff introduce dim order variant of empty operator, to replace the original empty operator when using dim order in ExecuTorch

Differential Revision: [D66683250](https://our.internmc.facebook.com/intern/diff/D66683250/)
  • Loading branch information
Gasoonjia committed Dec 4, 2024
1 parent 5f0a14a commit c0de556
Show file tree
Hide file tree
Showing 8 changed files with 373 additions and 0 deletions.
110 changes: 110 additions & 0 deletions kernels/aten/cpu/op__empty_dim_order.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>

#include <cstdint>
#include <cstring>

namespace torch {
namespace executor {
namespace native {

using exec_aten::IntArrayRef;
using exec_aten::Tensor;
using OptionalIntArrayRef = exec_aten::OptionalArrayRef<int64_t>;
using DimOrderArrayRef = exec_aten::ArrayRef<executorch::aten::DimOrderType>;
// Out Aten tensor shall have same memory format stride as dim_order
const size_t kMaxNumOfDimensions = 16;

namespace {

inline bool _check__empty_out_dim_order(
OptionalIntArrayRef dim_order,
Tensor& out) {
exec_aten::ArrayRef<int64_t> dim_order_ref;
std::vector<int64_t> dim_order_vec;

if (dim_order.has_value()) {
// out tensor's dim order shall equal to input dim order
dim_order_ref = exec_aten::ArrayRef<int64_t>(
dim_order.value().data(), dim_order.value().size());
} else { // dim_order is not set, out tensor should be contiguous dim order
for (int i = 0; i < out.dim(); i++) {
dim_order_vec.push_back(i);
}
dim_order_ref = exec_aten::ArrayRef<int64_t>(dim_order_vec);
}

// dim order size shall equal to input dim
ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == out.dim());

ET_LOG_AND_RETURN_IF_FALSE(
is_channels_last_dim_order(dim_order_ref.data(), dim_order_ref.size()) ||
is_contiguous_dim_order(dim_order_ref.data(), dim_order_ref.size()));

ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim());
exec_aten::StridesType target_strides[kMaxNumOfDimensions];
dim_order_to_stride_nocheck(
out.sizes().data(),
dim_order_ref.data(),
dim_order_ref.size(),
target_strides);

for (size_t i = 0; i < dim_order_ref.size(); i++) {
ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]);
}

return true;
}

} // namespace

/*
* Empty out tensor with specified dim order
*
* _empty_dim_order.out(SymInt[] size, *, int[]? dim_order=None, Tensor(a!) out)
* -> Tensor(a!)
*/
Tensor& _empty_dim_order_out(
KernelRuntimeContext& context,
IntArrayRef size,
OptionalIntArrayRef dim_order,
Tensor& out) {
(void)context;

// Check if dim_order is valid
ET_KERNEL_CHECK(
context,
_check__empty_out_dim_order(dim_order, out),
InvalidArgument,
out);

// Resize for dynamic shape
ET_KERNEL_CHECK_MSG(
context,
resize_tensor(out, size) == Error::Ok,
InvalidArgument,
out,
"Failed to resize output tensor.");

return out;
}

Tensor& _empty_dim_order_out(
IntArrayRef size,
OptionalIntArrayRef dim_order,
Tensor& out) {
executorch::runtime::KernelRuntimeContext ctx{};
return _empty_dim_order_out(ctx, size, dim_order, out);
}

} // namespace native
} // namespace executor
} // namespace torch
3 changes: 3 additions & 0 deletions kernels/aten/cpu/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ load("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl", "d
# ops, and must be split. They can, however, share common code via a library dep
# if necessary.
_EDGE_DIALECT_OPS = (
op_target(
name = "op__empty_dim_order",
),
op_target(
name = "op__to_dim_order_copy",
deps = [
Expand Down
5 changes: 5 additions & 0 deletions kernels/aten/edge_dialect_aten_op.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,11 @@
#
# This yaml file contains operators that are defined by ExecuTorch and used in ATen mode.

- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: torch::executor::_empty_dim_order_out

- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
Expand Down
82 changes: 82 additions & 0 deletions kernels/portable/cpu/op__empty_dim_order.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>

#include <cstdint>
#include <cstring>

namespace torch {
namespace executor {
namespace native {

using exec_aten::Tensor;
using OptionalIntArrayRef = exec_aten::OptionalArrayRef<int64_t>;
using DimOrderArrayRef = exec_aten::ArrayRef<executorch::aten::DimOrderType>;

namespace {

bool _check__empty_out_dim_order(OptionalIntArrayRef dim_order, Tensor& out) {
DimOrderArrayRef out_dim_order = out.dim_order();

if (dim_order.has_value()) {
// out tensor's dim order shall equal to input dim order
IntArrayRef dim_order_ref = dim_order.value();

ET_LOG_AND_RETURN_IF_FALSE(
is_channels_last_dim_order(
dim_order.value().data(), dim_order.value().size()) ||
is_contiguous_dim_order(
dim_order.value().data(), dim_order.value().size()));

// Out tensor shall have same dim order as dim_order
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order.size() == dim_order_ref.size());
for (size_t i = 0; i < dim_order_ref.size(); i++) {
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order[i] == dim_order_ref[i]);
}
} else { // dim_order is not set, out tensor should be contiguous memory
// format
ET_LOG_AND_RETURN_IF_FALSE(
is_contiguous_dim_order(out_dim_order.data(), out_dim_order.size()));
}
return true;
}

} // namespace

/*
* Empty out tensor with specified dim order
*
* _empty_dim_order.out(SymInt[] size, *, int[]? dim_order=None, Tensor(a!) out)
* -> Tensor(a!)
*/
Tensor& _empty_dim_order_out(
KernelRuntimeContext& context,
IntArrayRef size,
OptionalIntArrayRef dim_order,
Tensor& out) {
(void)context;

// Check if dim_order is valid
_check__empty_out_dim_order(dim_order, out);

// Resize for dynamic shape
ET_KERNEL_CHECK_MSG(
context,
resize_tensor(out, size) == Error::Ok,
InvalidArgument,
out,
"Failed to resize output tensor.");

return out;
}

} // namespace native
} // namespace executor
} // namespace torch
5 changes: 5 additions & 0 deletions kernels/portable/functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -937,6 +937,11 @@
- arg_meta: null
kernel_name: torch::executor::zeros_out

- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: torch::executor::_empty_dim_order_out

- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
Expand Down
161 changes: 161 additions & 0 deletions kernels/test/op__empty_dim_order_test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
#include <executorch/kernels/test/TestUtil.h>
#include <executorch/kernels/test/supported_features.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>

#include <gtest/gtest.h>

using namespace ::testing;
using exec_aten::DimOrderType;
using exec_aten::IntArrayRef;
using exec_aten::optional;
using exec_aten::OptionalArrayRef;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using torch::executor::testing::TensorFactory;

class OpEmptyDimOrderOutTest : public OperatorTest {
protected:
Tensor& op_empty_dim_order_out(
IntArrayRef size,
OptionalArrayRef<int64_t> dim_order,
Tensor& out) {
return torch::executor::dim_order_ops::_empty_dim_order_outf(
context_, size, dim_order, out);
}

template <ScalarType DTYPE>
void test_op_empty_dim_order_out(std::vector<int32_t>&& size_int32_t) {
TensorFactory<DTYPE> tf;
std::vector<int64_t> sizes(size_int32_t.begin(), size_int32_t.end());
auto aref = exec_aten::ArrayRef<int64_t>(sizes.data(), sizes.size());
OptionalArrayRef<int64_t> dim_order;
Tensor out = tf.ones(size_int32_t);

op_empty_dim_order_out(aref, dim_order, out);
}

void too_short_dim_order_die() {
TensorFactory<ScalarType::Float> tf;

int64_t sizes[3] = {3, 2, 4};
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);

int64_t raw_dim_order[2] = {0, 1};
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
Tensor out =
tf.ones({3, 2, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
ET_EXPECT_KERNEL_FAILURE(
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
}

void illegal_dim_order_die() {
TensorFactory<ScalarType::Float> tf;

int64_t sizes[2] = {3, 2};
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);

int64_t raw_dim_order[2] = {1, 2};
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
Tensor out =
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
ET_EXPECT_KERNEL_FAILURE(
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
}

void wrong_dim_order_die() {
TensorFactory<ScalarType::Float> tf;

int64_t sizes[4] = {3, 2, 4, 5};
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);

// should be {0, 2, 3, 1}
int64_t raw_dim_order[4] = {0, 1, 2, 3};
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
Tensor out = tf.full_channels_last(
{3, 2, 4, 5}, 1, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
ET_EXPECT_KERNEL_FAILURE(
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
}
};

#define GENERATE_TEST(_, DTYPE) \
TEST_F(OpEmptyDimOrderOutTest, DTYPE##Tensors) { \
test_op_empty_dim_order_out<ScalarType::DTYPE>({2, 3, 4}); \
test_op_empty_dim_order_out<ScalarType::DTYPE>({2, 0, 4}); \
test_op_empty_dim_order_out<ScalarType::DTYPE>({}); \
}

ET_FORALL_REAL_TYPES_AND(Bool, GENERATE_TEST)

TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUpperBoundSameAsExpected) {
TensorFactory<ScalarType::Float> tf;

int64_t sizes[2] = {3, 2};
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
OptionalArrayRef<int64_t> dim_order;
Tensor out =
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
op_empty_dim_order_out(sizes_aref, dim_order, out);
}

TEST_F(OpEmptyDimOrderOutTest, ContiguousDimOrderSuccees) {
TensorFactory<ScalarType::Float> tf;

int64_t sizes[2] = {3, 2};
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);

int64_t raw_dim_order[2] = {0, 1};
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
Tensor out =
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
op_empty_dim_order_out(sizes_aref, dim_order, out);
}

TEST_F(OpEmptyDimOrderOutTest, ChannelsLastsDimOrderSuccees) {
TensorFactory<ScalarType::Float> tf;

int64_t sizes[4] = {3, 2, 4, 5};
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);

int64_t raw_dim_order[4] = {0, 2, 3, 1};
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
Tensor out = tf.full_channels_last(
{3, 2, 4, 5}, 1, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
op_empty_dim_order_out(sizes_aref, dim_order, out);
}

TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUpperBoundLargerThanExpected) {
TensorFactory<ScalarType::Float> tf;

int64_t sizes[2] = {3, 2};
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
OptionalArrayRef<int64_t> dim_order;
Tensor out =
tf.ones({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
op_empty_dim_order_out(sizes_aref, dim_order, out);
}

TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUnbound) {
if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
GTEST_SKIP() << "Dynamic shape unbound not supported";
}
TensorFactory<ScalarType::Float> tf;

int64_t sizes[2] = {3, 2};
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
OptionalArrayRef<int64_t> dim_order;
Tensor out =
tf.ones({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
op_empty_dim_order_out(sizes_aref, dim_order, out);
}
1 change: 1 addition & 0 deletions kernels/test/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ def define_common_targets():
codegen_function_header_wrapper("executorch/kernels/test/custom_kernel_example", "custom_kernel_example")

_common_op_test("op__to_dim_order_copy_test", ["aten", "portable"])
_common_op_test("op__empty_dim_order_test", ["aten", "portable"])
_common_op_test("op_abs_test", ["aten", "portable"])
_common_op_test("op_acos_test", ["aten", "portable"])
_common_op_test("op_acosh_test", ["aten", "portable"])
Expand Down
Loading

0 comments on commit c0de556

Please sign in to comment.