-
Notifications
You must be signed in to change notification settings - Fork 389
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[et][dim order] dim order variant empty operator
Pull Request resolved: #7154 ghstack-source-id: 256177391 @exported-using-ghexport This diff introduce dim order variant of empty operator, to replace the original empty operator when using dim order in ExecuTorch Differential Revision: [D66683250](https://our.internmc.facebook.com/intern/diff/D66683250/)
- Loading branch information
Showing
8 changed files
with
373 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,110 @@ | ||
/* | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* All rights reserved. | ||
* | ||
* This source code is licensed under the BSD-style license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
|
||
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h> | ||
#include <executorch/runtime/kernel/kernel_includes.h> | ||
|
||
#include <cstdint> | ||
#include <cstring> | ||
|
||
namespace torch { | ||
namespace executor { | ||
namespace native { | ||
|
||
using exec_aten::IntArrayRef; | ||
using exec_aten::Tensor; | ||
using OptionalIntArrayRef = exec_aten::OptionalArrayRef<int64_t>; | ||
using DimOrderArrayRef = exec_aten::ArrayRef<executorch::aten::DimOrderType>; | ||
// Out Aten tensor shall have same memory format stride as dim_order | ||
const size_t kMaxNumOfDimensions = 16; | ||
|
||
namespace { | ||
|
||
inline bool _check__empty_out_dim_order( | ||
OptionalIntArrayRef dim_order, | ||
Tensor& out) { | ||
exec_aten::ArrayRef<int64_t> dim_order_ref; | ||
std::vector<int64_t> dim_order_vec; | ||
|
||
if (dim_order.has_value()) { | ||
// out tensor's dim order shall equal to input dim order | ||
dim_order_ref = exec_aten::ArrayRef<int64_t>( | ||
dim_order.value().data(), dim_order.value().size()); | ||
} else { // dim_order is not set, out tensor should be contiguous dim order | ||
for (int i = 0; i < out.dim(); i++) { | ||
dim_order_vec.push_back(i); | ||
} | ||
dim_order_ref = exec_aten::ArrayRef<int64_t>(dim_order_vec); | ||
} | ||
|
||
// dim order size shall equal to input dim | ||
ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == out.dim()); | ||
|
||
ET_LOG_AND_RETURN_IF_FALSE( | ||
is_channels_last_dim_order(dim_order_ref.data(), dim_order_ref.size()) || | ||
is_contiguous_dim_order(dim_order_ref.data(), dim_order_ref.size())); | ||
|
||
ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim()); | ||
exec_aten::StridesType target_strides[kMaxNumOfDimensions]; | ||
dim_order_to_stride_nocheck( | ||
out.sizes().data(), | ||
dim_order_ref.data(), | ||
dim_order_ref.size(), | ||
target_strides); | ||
|
||
for (size_t i = 0; i < dim_order_ref.size(); i++) { | ||
ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]); | ||
} | ||
|
||
return true; | ||
} | ||
|
||
} // namespace | ||
|
||
/* | ||
* Empty out tensor with specified dim order | ||
* | ||
* _empty_dim_order.out(SymInt[] size, *, int[]? dim_order=None, Tensor(a!) out) | ||
* -> Tensor(a!) | ||
*/ | ||
Tensor& _empty_dim_order_out( | ||
KernelRuntimeContext& context, | ||
IntArrayRef size, | ||
OptionalIntArrayRef dim_order, | ||
Tensor& out) { | ||
(void)context; | ||
|
||
// Check if dim_order is valid | ||
ET_KERNEL_CHECK( | ||
context, | ||
_check__empty_out_dim_order(dim_order, out), | ||
InvalidArgument, | ||
out); | ||
|
||
// Resize for dynamic shape | ||
ET_KERNEL_CHECK_MSG( | ||
context, | ||
resize_tensor(out, size) == Error::Ok, | ||
InvalidArgument, | ||
out, | ||
"Failed to resize output tensor."); | ||
|
||
return out; | ||
} | ||
|
||
Tensor& _empty_dim_order_out( | ||
IntArrayRef size, | ||
OptionalIntArrayRef dim_order, | ||
Tensor& out) { | ||
executorch::runtime::KernelRuntimeContext ctx{}; | ||
return _empty_dim_order_out(ctx, size, dim_order, out); | ||
} | ||
|
||
} // namespace native | ||
} // namespace executor | ||
} // namespace torch |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
/* | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* All rights reserved. | ||
* | ||
* This source code is licensed under the BSD-style license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
|
||
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h> | ||
#include <executorch/runtime/kernel/kernel_includes.h> | ||
|
||
#include <cstdint> | ||
#include <cstring> | ||
|
||
namespace torch { | ||
namespace executor { | ||
namespace native { | ||
|
||
using exec_aten::Tensor; | ||
using OptionalIntArrayRef = exec_aten::OptionalArrayRef<int64_t>; | ||
using DimOrderArrayRef = exec_aten::ArrayRef<executorch::aten::DimOrderType>; | ||
|
||
namespace { | ||
|
||
bool _check__empty_out_dim_order(OptionalIntArrayRef dim_order, Tensor& out) { | ||
DimOrderArrayRef out_dim_order = out.dim_order(); | ||
|
||
if (dim_order.has_value()) { | ||
// out tensor's dim order shall equal to input dim order | ||
IntArrayRef dim_order_ref = dim_order.value(); | ||
|
||
ET_LOG_AND_RETURN_IF_FALSE( | ||
is_channels_last_dim_order( | ||
dim_order.value().data(), dim_order.value().size()) || | ||
is_contiguous_dim_order( | ||
dim_order.value().data(), dim_order.value().size())); | ||
|
||
// Out tensor shall have same dim order as dim_order | ||
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order.size() == dim_order_ref.size()); | ||
for (size_t i = 0; i < dim_order_ref.size(); i++) { | ||
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order[i] == dim_order_ref[i]); | ||
} | ||
} else { // dim_order is not set, out tensor should be contiguous memory | ||
// format | ||
ET_LOG_AND_RETURN_IF_FALSE( | ||
is_contiguous_dim_order(out_dim_order.data(), out_dim_order.size())); | ||
} | ||
return true; | ||
} | ||
|
||
} // namespace | ||
|
||
/* | ||
* Empty out tensor with specified dim order | ||
* | ||
* _empty_dim_order.out(SymInt[] size, *, int[]? dim_order=None, Tensor(a!) out) | ||
* -> Tensor(a!) | ||
*/ | ||
Tensor& _empty_dim_order_out( | ||
KernelRuntimeContext& context, | ||
IntArrayRef size, | ||
OptionalIntArrayRef dim_order, | ||
Tensor& out) { | ||
(void)context; | ||
|
||
// Check if dim_order is valid | ||
_check__empty_out_dim_order(dim_order, out); | ||
|
||
// Resize for dynamic shape | ||
ET_KERNEL_CHECK_MSG( | ||
context, | ||
resize_tensor(out, size) == Error::Ok, | ||
InvalidArgument, | ||
out, | ||
"Failed to resize output tensor."); | ||
|
||
return out; | ||
} | ||
|
||
} // namespace native | ||
} // namespace executor | ||
} // namespace torch |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,161 @@ | ||
/* | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* All rights reserved. | ||
* | ||
* This source code is licensed under the BSD-style license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
|
||
#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator | ||
#include <executorch/kernels/test/TestUtil.h> | ||
#include <executorch/kernels/test/supported_features.h> | ||
#include <executorch/runtime/core/exec_aten/exec_aten.h> | ||
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h> | ||
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h> | ||
|
||
#include <gtest/gtest.h> | ||
|
||
using namespace ::testing; | ||
using exec_aten::DimOrderType; | ||
using exec_aten::IntArrayRef; | ||
using exec_aten::optional; | ||
using exec_aten::OptionalArrayRef; | ||
using exec_aten::ScalarType; | ||
using exec_aten::Tensor; | ||
using torch::executor::testing::TensorFactory; | ||
|
||
class OpEmptyDimOrderOutTest : public OperatorTest { | ||
protected: | ||
Tensor& op_empty_dim_order_out( | ||
IntArrayRef size, | ||
OptionalArrayRef<int64_t> dim_order, | ||
Tensor& out) { | ||
return torch::executor::dim_order_ops::_empty_dim_order_outf( | ||
context_, size, dim_order, out); | ||
} | ||
|
||
template <ScalarType DTYPE> | ||
void test_op_empty_dim_order_out(std::vector<int32_t>&& size_int32_t) { | ||
TensorFactory<DTYPE> tf; | ||
std::vector<int64_t> sizes(size_int32_t.begin(), size_int32_t.end()); | ||
auto aref = exec_aten::ArrayRef<int64_t>(sizes.data(), sizes.size()); | ||
OptionalArrayRef<int64_t> dim_order; | ||
Tensor out = tf.ones(size_int32_t); | ||
|
||
op_empty_dim_order_out(aref, dim_order, out); | ||
} | ||
|
||
void too_short_dim_order_die() { | ||
TensorFactory<ScalarType::Float> tf; | ||
|
||
int64_t sizes[3] = {3, 2, 4}; | ||
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes); | ||
|
||
int64_t raw_dim_order[2] = {0, 1}; | ||
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order); | ||
Tensor out = | ||
tf.ones({3, 2, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); | ||
ET_EXPECT_KERNEL_FAILURE( | ||
context_, op_empty_dim_order_out(sizes_aref, dim_order, out)); | ||
} | ||
|
||
void illegal_dim_order_die() { | ||
TensorFactory<ScalarType::Float> tf; | ||
|
||
int64_t sizes[2] = {3, 2}; | ||
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes); | ||
|
||
int64_t raw_dim_order[2] = {1, 2}; | ||
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order); | ||
Tensor out = | ||
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); | ||
ET_EXPECT_KERNEL_FAILURE( | ||
context_, op_empty_dim_order_out(sizes_aref, dim_order, out)); | ||
} | ||
|
||
void wrong_dim_order_die() { | ||
TensorFactory<ScalarType::Float> tf; | ||
|
||
int64_t sizes[4] = {3, 2, 4, 5}; | ||
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes); | ||
|
||
// should be {0, 2, 3, 1} | ||
int64_t raw_dim_order[4] = {0, 1, 2, 3}; | ||
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order); | ||
Tensor out = tf.full_channels_last( | ||
{3, 2, 4, 5}, 1, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); | ||
ET_EXPECT_KERNEL_FAILURE( | ||
context_, op_empty_dim_order_out(sizes_aref, dim_order, out)); | ||
} | ||
}; | ||
|
||
#define GENERATE_TEST(_, DTYPE) \ | ||
TEST_F(OpEmptyDimOrderOutTest, DTYPE##Tensors) { \ | ||
test_op_empty_dim_order_out<ScalarType::DTYPE>({2, 3, 4}); \ | ||
test_op_empty_dim_order_out<ScalarType::DTYPE>({2, 0, 4}); \ | ||
test_op_empty_dim_order_out<ScalarType::DTYPE>({}); \ | ||
} | ||
|
||
ET_FORALL_REAL_TYPES_AND(Bool, GENERATE_TEST) | ||
|
||
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUpperBoundSameAsExpected) { | ||
TensorFactory<ScalarType::Float> tf; | ||
|
||
int64_t sizes[2] = {3, 2}; | ||
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes); | ||
OptionalArrayRef<int64_t> dim_order; | ||
Tensor out = | ||
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); | ||
op_empty_dim_order_out(sizes_aref, dim_order, out); | ||
} | ||
|
||
TEST_F(OpEmptyDimOrderOutTest, ContiguousDimOrderSuccees) { | ||
TensorFactory<ScalarType::Float> tf; | ||
|
||
int64_t sizes[2] = {3, 2}; | ||
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes); | ||
|
||
int64_t raw_dim_order[2] = {0, 1}; | ||
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order); | ||
Tensor out = | ||
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); | ||
op_empty_dim_order_out(sizes_aref, dim_order, out); | ||
} | ||
|
||
TEST_F(OpEmptyDimOrderOutTest, ChannelsLastsDimOrderSuccees) { | ||
TensorFactory<ScalarType::Float> tf; | ||
|
||
int64_t sizes[4] = {3, 2, 4, 5}; | ||
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes); | ||
|
||
int64_t raw_dim_order[4] = {0, 2, 3, 1}; | ||
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order); | ||
Tensor out = tf.full_channels_last( | ||
{3, 2, 4, 5}, 1, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); | ||
op_empty_dim_order_out(sizes_aref, dim_order, out); | ||
} | ||
|
||
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUpperBoundLargerThanExpected) { | ||
TensorFactory<ScalarType::Float> tf; | ||
|
||
int64_t sizes[2] = {3, 2}; | ||
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes); | ||
OptionalArrayRef<int64_t> dim_order; | ||
Tensor out = | ||
tf.ones({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); | ||
op_empty_dim_order_out(sizes_aref, dim_order, out); | ||
} | ||
|
||
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUnbound) { | ||
if (!torch::executor::testing::SupportedFeatures::get()->output_resize) { | ||
GTEST_SKIP() << "Dynamic shape unbound not supported"; | ||
} | ||
TensorFactory<ScalarType::Float> tf; | ||
|
||
int64_t sizes[2] = {3, 2}; | ||
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes); | ||
OptionalArrayRef<int64_t> dim_order; | ||
Tensor out = | ||
tf.ones({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND); | ||
op_empty_dim_order_out(sizes_aref, dim_order, out); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.