Skip to content

Commit

Permalink
Address review comments.
Browse files Browse the repository at this point in the history
  • Loading branch information
yuslepukhin committed Jan 24, 2024
1 parent 0c4cb26 commit fb5245d
Show file tree
Hide file tree
Showing 6 changed files with 13 additions and 11 deletions.
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/cpu/cpu_provider_shared.cc
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ struct ProviderHostCPUImpl : ProviderHostCPU {
// From cpu/tensor/padbase.h (direct)
Status PadBase__HandleDimValueZero(const Mode& mode, const TensorShape& input_shape, const TensorShape& output_shape) override { return PadBase::HandleDimValueZero(mode, input_shape, output_shape); }

Check warning on line 90 in onnxruntime/core/providers/cpu/cpu_provider_shared.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/cpu_provider_shared.cc#L90

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/cpu/cpu_provider_shared.cc:90:  Lines should be <= 120 characters long  [whitespace/line_length] [2]

void PadBase__ComputePads(OpKernelContext* ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
void PadBase__ComputePads(OpKernelContext& ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
PadsVector& pads) override {
PadBase::ComputePads(ctx, data_rank, pads_data, pads);
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/cpu/cpu_provider_shared.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ struct ProviderHostCPU {
// From cpu/tensor/padbase.h
virtual Status PadBase__HandleDimValueZero(const Mode& mode, const TensorShape& input_shape, const TensorShape& output_shape) = 0;

Check warning on line 49 in onnxruntime/core/providers/cpu/cpu_provider_shared.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/cpu_provider_shared.h#L49

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/cpu/cpu_provider_shared.h:49:  Lines should be <= 120 characters long  [whitespace/line_length] [2]

virtual void PadBase__ComputePads(OpKernelContext* ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
virtual void PadBase__ComputePads(OpKernelContext& ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
PadsVector& pads) = 0;

// From cpu/tensor/split.h
Expand Down
8 changes: 5 additions & 3 deletions onnxruntime/core/providers/cpu/tensor/pad.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
#include "core/providers/op_kernel_type_control.h"
#include "core/util/math.h"

#include <functional>

Check warning on line 12 in onnxruntime/core/providers/cpu/tensor/pad.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/tensor/pad.cc#L12

Found C++ system header after other header. Should be: pad.h, c system, c++ system, other. [build/include_order] [4]
Raw output
onnxruntime/core/providers/cpu/tensor/pad.cc:12:  Found C++ system header after other header. Should be: pad.h, c system, c++ system, other.  [build/include_order] [4]

// there's no way to use a raw pointer as the copy destination with std::copy_n
// (which gsl::copy uses with span::data() which returns a raw pointer) with the 14.11 toolset
// without generating a 4996 warning. going through an iterator is way too much overhead so turn off the warning.
Expand Down Expand Up @@ -215,10 +217,10 @@ static void ComputePadWithAxes(
}
}

void PadBase::ComputePads(OpKernelContext* ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
void PadBase::ComputePads(OpKernelContext& ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
PadsVector& pads) {
pads.reserve(2 * data_rank);
const Tensor* axes_tensor = ctx->Input<Tensor>(3);
const Tensor* axes_tensor = ctx.Input<Tensor>(3);
if (axes_tensor) {
const size_t num_axes_dims = axes_tensor->Shape().NumDimensions();
ORT_ENFORCE(num_axes_dims == 1, "Axes tensor should be a 1D tensor ");
Expand Down Expand Up @@ -652,7 +654,7 @@ Status Pad::Compute(OpKernelContext* ctx) const {
const auto pads_data = pads_tensor.DataAsSpan<int64_t>();

// Compute Pads by applying axes if specified otherwise copy the supplied pads.
PadBase::ComputePads(ctx, data_rank, pads_data, pads);
PadBase::ComputePads(*ctx, data_rank, pads_data, pads);

// Separate out any negative pads into the slices array
PadBase::SeparateNegativeToSlices(pads, slices);
Expand Down
8 changes: 4 additions & 4 deletions onnxruntime/core/providers/cpu/tensor/padbase.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#pragma once

#include "core/common/inlined_containers.h"
#include <functional>

namespace onnxruntime {

Expand All @@ -23,8 +22,9 @@ class PadBase {
// The following several functions are shared among the providers

/// <summary>
/// Update the output_shape to make it consistent with numpy handling where there are one or more dimensions
/// in the input_shape with a value of zero.
/// Handle the case when the input shape has zero dim values.
/// Depending on the mode, the input dim with zero value must match the output dim value.
///

Check warning on line 27 in onnxruntime/core/providers/cpu/tensor/padbase.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/cpu/tensor/padbase.h#L27

Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]
Raw output
onnxruntime/core/providers/cpu/tensor/padbase.h:27:  Line ends in whitespace.  Consider deleting these extra spaces.  [whitespace/end_of_line] [4]
/// </summary>
/// <param name="mode">Padding mode enum value</param>
/// <param name="input_shape">actual input shape</param>
Expand All @@ -43,7 +43,7 @@ class PadBase {
/// <param name="data_rank">input rank</param>
/// <param name="pads_data">pads data from pads input</param>
/// <param name="pads">resulting pads</param>
static void ComputePads(OpKernelContext* ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
static void ComputePads(OpKernelContext& ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
PadsVector& pads);

/// <summary>
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/cuda/tensor/pad.cc
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Status Pad<T>::ComputeInternal(OpKernelContext* ctx) const {

const auto pads_data = pads_tensor.DataAsSpan<int64_t>();

PadBase::ComputePads(ctx, input_shape.NumDimensions(), pads_data, pads);
PadBase::ComputePads(*ctx, input_shape.NumDimensions(), pads_data, pads);

// Separate out any negative pads into the slices array
PadBase::SeparateNegativeToSlices(pads, slices);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,7 @@ Status PadBase::HandleDimValueZero(const Mode& mode, const TensorShape& input_sh
return g_host_cpu.PadBase__HandleDimValueZero(mode, input_shape, output_shape);
}

void PadBase::ComputePads(OpKernelContext* ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
void PadBase::ComputePads(OpKernelContext& ctx, size_t data_rank, gsl::span<const int64_t> pads_data,
PadsVector& pads) {
g_host_cpu.PadBase__ComputePads(ctx, data_rank, pads_data, pads);
}
Expand Down

0 comments on commit fb5245d

Please sign in to comment.