Skip to content

Commit

Permalink
rm disp
Browse files Browse the repository at this point in the history
Signed-off-by: Liqun Fu <[email protected]>
  • Loading branch information
liqunfu committed Oct 4, 2023
1 parent 7f02b6a commit 8abffad
Showing 1 changed file with 31 additions and 38 deletions.
69 changes: 31 additions & 38 deletions onnxruntime/core/providers/cpu/tensor/affine_grid.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,44 +71,39 @@ void generate_base_grid_3d(int64_t D, int64_t H, int64_t W, bool align_corners,
}

template <typename T>
struct AffineGridGenerator2D {
void operator()(const Tensor* theta, const Eigen::Matrix<T, 2, Eigen::Dynamic>& base_grid_transposed, int64_t batch_num, int64_t H, int64_t W, Tensor* grid) const {
const Eigen::StorageOptions option = Eigen::RowMajor;
auto theta_batch_offset = batch_num * 2 * 3;
const T* theta_data = theta->Data<T>() + theta_batch_offset;
const Eigen::Matrix<T, 2, 2, option> theta_R{{theta_data[0], theta_data[1]}, {theta_data[3], theta_data[4]}};
const Eigen::Array<T, 2, 1> theta_T(theta_data[2], theta_data[5]);

auto grid_batch_offset = batch_num * H * W * 2;
T* grid_data = grid->MutableData<T>() + grid_batch_offset;
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, 2, option>> grid_matrix(grid_data, narrow<size_t>(H * W), 2);
grid_matrix = ((theta_R * base_grid_transposed).array().colwise() + theta_T).matrix().transpose();
}
};
void affine_grid_generator_2d (const Tensor* theta, const Eigen::Matrix<T, 2, Eigen::Dynamic>& base_grid_transposed, int64_t batch_num, int64_t H, int64_t W, Tensor* grid) {

Check warning on line 74 in onnxruntime/core/providers/cpu/tensor/affine_grid.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/tensor/affine_grid.cc#L74

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/cpu/tensor/affine_grid.cc:74:  Lines should be <= 120 characters long  [whitespace/line_length] [2]

Check warning on line 74 in onnxruntime/core/providers/cpu/tensor/affine_grid.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/tensor/affine_grid.cc#L74

Extra space before ( in function call [whitespace/parens] [4]
Raw output
onnxruntime/core/providers/cpu/tensor/affine_grid.cc:74:  Extra space before ( in function call  [whitespace/parens] [4]
const Eigen::StorageOptions option = Eigen::RowMajor;
auto theta_batch_offset = batch_num * 2 * 3;
const T* theta_data = theta->Data<T>() + theta_batch_offset;
const Eigen::Matrix<T, 2, 2, option> theta_R{{theta_data[0], theta_data[1]}, {theta_data[3], theta_data[4]}};
const Eigen::Array<T, 2, 1> theta_T(theta_data[2], theta_data[5]);

auto grid_batch_offset = batch_num * H * W * 2;
T* grid_data = grid->MutableData<T>() + grid_batch_offset;
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, 2, option>> grid_matrix(grid_data, narrow<size_t>(H * W), 2);
grid_matrix = ((theta_R * base_grid_transposed).array().colwise() + theta_T).matrix().transpose();
}

template <typename T>
struct AffineGridGenerator3D {
void operator()(const Tensor* theta, const Eigen::Matrix<T, 3, Eigen::Dynamic>& base_grid_transposed, int64_t batch_num, int64_t D, int64_t H, int64_t W, Tensor* grid) {
const Eigen::StorageOptions option = Eigen::RowMajor;
auto theta_batch_offset = batch_num * 3 * 4;
const T* theta_data = theta->Data<T>() + theta_batch_offset;
const Eigen::Matrix<T, 3, 3, option> theta_R{
{theta_data[0], theta_data[1], theta_data[2]},
{theta_data[4], theta_data[5], theta_data[6]},
{theta_data[8], theta_data[9], theta_data[10]}};
const Eigen::Array<T, 3, 1> theta_T(theta_data[3], theta_data[7], theta_data[11]);

auto grid_batch_offset = batch_num * D * H * W * 3;
T* grid_data = grid->MutableData<T>() + grid_batch_offset;
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, 3, option>> grid_matrix(grid_data, narrow<size_t>(D * H * W), 3);
grid_matrix = ((theta_R * base_grid_transposed).array().colwise() + theta_T).matrix().transpose();
}
};
void affine_grid_generator_3d (const Tensor* theta, const Eigen::Matrix<T, 3, Eigen::Dynamic>& base_grid_transposed, int64_t batch_num, int64_t D, int64_t H, int64_t W, Tensor* grid) {

Check warning on line 88 in onnxruntime/core/providers/cpu/tensor/affine_grid.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/tensor/affine_grid.cc#L88

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/cpu/tensor/affine_grid.cc:88:  Lines should be <= 120 characters long  [whitespace/line_length] [2]

Check warning on line 88 in onnxruntime/core/providers/cpu/tensor/affine_grid.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/tensor/affine_grid.cc#L88

Extra space before ( in function call [whitespace/parens] [4]
Raw output
onnxruntime/core/providers/cpu/tensor/affine_grid.cc:88:  Extra space before ( in function call  [whitespace/parens] [4]
const Eigen::StorageOptions option = Eigen::RowMajor;
auto theta_batch_offset = batch_num * 3 * 4;
const T* theta_data = theta->Data<T>() + theta_batch_offset;
const Eigen::Matrix<T, 3, 3, option> theta_R{
{theta_data[0], theta_data[1], theta_data[2]},
{theta_data[4], theta_data[5], theta_data[6]},
{theta_data[8], theta_data[9], theta_data[10]}};
const Eigen::Array<T, 3, 1> theta_T(theta_data[3], theta_data[7], theta_data[11]);

auto grid_batch_offset = batch_num * D * H * W * 3;
T* grid_data = grid->MutableData<T>() + grid_batch_offset;
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, 3, option>> grid_matrix(grid_data, narrow<size_t>(D * H * W), 3);
grid_matrix = ((theta_R * base_grid_transposed).array().colwise() + theta_T).matrix().transpose();
}

template <typename T>
Status AffineGrid<T>::Compute(OpKernelContext* context) const {
const Tensor* theta = context->Input<Tensor>(0);
const auto elem_type = theta->GetElementType();
const TensorShape& theta_shape = theta->Shape();
if (theta_shape.NumDimensions() != 3) {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "AffineGrid : Input theta tensor dimension is not 3");
Expand All @@ -128,9 +123,8 @@ Status AffineGrid<T>::Compute(OpKernelContext* context) const {
generate_base_grid_2d(H, W, align_corners_, base_grid);
Eigen::Matrix<T, 2, Eigen::Dynamic> base_grid_transposed = base_grid.transpose();

std::function<void(ptrdiff_t)> fn = [elem_type, theta, base_grid_transposed, H, W, grid](ptrdiff_t batch_num) {
utils::MLTypeCallDispatcher<T> t_disp(elem_type);
t_disp.Invoke<AffineGridGenerator2D>(theta, base_grid_transposed, batch_num, H, W, grid);
std::function<void(ptrdiff_t)> fn = [theta, base_grid_transposed, H, W, grid](ptrdiff_t batch_num) {
affine_grid_generator_2d(theta, base_grid_transposed, batch_num, H, W, grid);
};

concurrency::ThreadPool::TryBatchParallelFor(context->GetOperatorThreadPool(), narrow<size_t>(N), std::move(fn), 0);
Expand All @@ -144,9 +138,8 @@ Status AffineGrid<T>::Compute(OpKernelContext* context) const {
generate_base_grid_3d(D, H, W, align_corners_, base_grid);
Eigen::Matrix<T, 3, Eigen::Dynamic> base_grid_transposed = base_grid.transpose();

std::function<void(ptrdiff_t)> fn = [elem_type, theta, base_grid_transposed, D, H, W, grid](ptrdiff_t batch_num) {
utils::MLTypeCallDispatcher<T> t_disp(elem_type);
t_disp.Invoke<AffineGridGenerator3D>(theta, base_grid_transposed, batch_num, D, H, W, grid);
std::function<void(ptrdiff_t)> fn = [theta, base_grid_transposed, D, H, W, grid](ptrdiff_t batch_num) {
affine_grid_generator_3d(theta, base_grid_transposed, batch_num, D, H, W, grid);
};

concurrency::ThreadPool::TryBatchParallelFor(context->GetOperatorThreadPool(), narrow<size_t>(N), std::move(fn), 0);

Check warning on line 145 in onnxruntime/core/providers/cpu/tensor/affine_grid.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/tensor/affine_grid.cc#L145

Add #include <utility> for move [build/include_what_you_use] [4]
Raw output
onnxruntime/core/providers/cpu/tensor/affine_grid.cc:145:  Add #include <utility> for move  [build/include_what_you_use] [4]
Expand Down

0 comments on commit 8abffad

Please sign in to comment.