Skip to content

Commit

Permalink
Docs lint
Browse files Browse the repository at this point in the history
  • Loading branch information
yuslepukhin committed Feb 23, 2024
1 parent 976f6a9 commit 250f54f
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 15 deletions.
3 changes: 2 additions & 1 deletion docs/OperatorKernels.md
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,8 @@ Do not modify directly.*
|||13|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)<br/> **shape** = tensor(int64)|
|||[5, 12]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)<br/> **shape** = tensor(int64)|
|||[1, 4]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|Resize|*in* X:**T**<br> *in* scales:**tensor(float)**<br> *out* Y:**T**<br><br>or<br><br>*in* X:**T1**<br> *in* roi:**T2**<br> *in* scales:**tensor(float)**<br> *in* sizes:**tensor(int64)**<br> *out* Y:**T1**|13+|**T1** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(uint8)|
|Resize|*in* X:**T**<br> *in* scales:**tensor(float)**<br> *out* Y:**T**<br><br>or<br><br>*in* X:**T1**<br> *in* roi:**T2**<br> *in* scales:**tensor(float)**<br> *in* sizes:**tensor(int64)**<br> *out* Y:**T1**|18+|**T1** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(uint8)|
|||[13, 17]|**T1** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(uint8)|
|||[11, 12]|**T1** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(uint8)|
|||10|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(uint8)|
|ReverseSequence|*in* input:**T**<br> *in* sequence_lens:**tensor(int64)**<br> *out* Y:**T**|10+|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
Expand Down
20 changes: 12 additions & 8 deletions onnxruntime/core/providers/cuda/tensor/resize_impl.cu
Original file line number Diff line number Diff line change
Expand Up @@ -268,8 +268,9 @@ __global__ void _ResizeBilinearCoordinateMapping(
static_cast<float>(output_height),
static_cast<float>(input_height),
roi_height_start, roi_height_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_y < 0 ||
input_y > static_cast<float>(input_height - 1)));
dims_mapping[id].extrapolate_ = static_cast<int>((extrapolation_enabled &&
(input_y < 0 ||
input_y > static_cast<float>(input_height - 1))));
input_y = max(0.0f, min(input_y, static_cast<float>(input_height - 1)));
int y_int = static_cast<int>(input_y);
dims_mapping[id].origin_ = y_int;
Expand All @@ -280,8 +281,9 @@ __global__ void _ResizeBilinearCoordinateMapping(
scale_width, static_cast<float>(output_width),
static_cast<float>(input_width), roi_width_start,
roi_width_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_x < 0 ||
input_x > static_cast<float>(input_width - 1)));
dims_mapping[id].extrapolate_ = static_cast<int>((extrapolation_enabled &&
(input_x < 0 ||
input_x > static_cast<float>(input_width - 1))));
input_x = max(0.0f, min(input_x, static_cast<float>(input_width - 1)));
int x_int = static_cast<int>(input_x);
dims_mapping[id].origin_ = x_int;
Expand Down Expand Up @@ -350,8 +352,9 @@ __global__ void _ResizeTrilinearCoordinateMapping(
static_cast<float>(output_depth),
static_cast<float>(input_depth),
roi_depth_start, roi_depth_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_z < 0 ||
input_z > static_cast<float>(input_depth - 1)));
dims_mapping[id].extrapolate_ = static_cast<int>((extrapolation_enabled &&
(input_z < 0 ||
input_z > static_cast<float>(input_depth - 1))));
input_z = max(0.0f, min(input_z, static_cast<float>(input_depth - 1)));
int z_int = static_cast<int>(input_z);
dims_mapping[id].origin_ = z_int;
Expand All @@ -363,8 +366,9 @@ __global__ void _ResizeTrilinearCoordinateMapping(
static_cast<float>(input_height),
roi_height_start, roi_height_end);

dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_y < 0 ||
input_y > static_cast<float>(input_height - 1)));
dims_mapping[id].extrapolate_ = static_cast<int>((extrapolation_enabled &&
(input_y < 0 ||
input_y > static_cast<float>(input_height - 1))));
input_y = max(0.0f, min(input_y, static_cast<float>(input_height - 1)));
int y_int = static_cast<int>(input_y);
dims_mapping[id].origin_ = y_int;
Expand Down
4 changes: 4 additions & 0 deletions onnxruntime/core/providers/cuda/tensor/resize_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,11 @@
// Licensed under the MIT License.

#pragma once

#include <stdint.h>

#include <tuple>

#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/common/common.h"
#include "core/providers/cpu/tensor/upsamplebase.h"
Expand Down
12 changes: 6 additions & 6 deletions onnxruntime/test/providers/cpu/tensor/resize_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2008,12 +2008,12 @@ TEST(ResizeOpTest, Antialias_NhwcBilinear) {
36.590908f, 76.59091f, 116.59091f};

// Nchw is not supported by CUDA Resize implementation
InlinedVector<std::string_view> excluded_eps = {kCudaExecutionProvider};
InlinedVector<std::string_view> excluded_eps = {kCudaExecutionProvider, kRocmExecutionProvider};
TestAntialiasing({{"mode", "linear"}, {"exclude_outside", "1"}}, {1, 5, 8, 3}, X, {1, 4, 5, 3}, Y, excluded_eps);
}

TEST(ResizeOpTest, Antialias_NhwcBilinear_dtype) {
InlinedVector<std::string_view> excluded_eps = {kCudaExecutionProvider};
InlinedVector<std::string_view> excluded_eps = {kCudaExecutionProvider, kRocmExecutionProvider};
{
std::vector<uint8_t> X(16);
std::iota(X.begin(), X.end(), uint8_t(0));
Expand Down Expand Up @@ -2160,7 +2160,7 @@ TEST(ResizeOpTest, Antialias_NHWCBicubic_ExcludeOutside) {
46.606194f, 19.878183f, 43.87818f, 21.358122f, 45.35812f,
22.907503f, 46.907505f, 24.387442f, 48.387444f};

InlinedVector<std::string_view> excluded_eps = {kCudaExecutionProvider};
InlinedVector<std::string_view> excluded_eps = {kCudaExecutionProvider, kRocmExecutionProvider};
TestAntialiasing({{"mode", "cubic"}, {"exclude_outside", "0"}}, {1, 4, 6, 2}, X, {1, 8, 4, 2}, Y, excluded_eps);
}

Expand All @@ -2183,7 +2183,7 @@ TEST(ResizeOpTest, Antialias_Linear_AlignCorners) {
187.08333f, 195.91667f, 198.41667f, 205.91667f, 208.41667f,
217.25f, 219.75f, 227.25f, 229.75f, 238.58333f,
241.08333f, 248.58333f, 251.08333f};
InlinedVector<std::string_view> excluded_eps = {kCudaExecutionProvider};
InlinedVector<std::string_view> excluded_eps = {kCudaExecutionProvider, kRocmExecutionProvider};
TestAntialiasing(
{{"mode", "linear"}, {"exclude_outside", "0"}, {"coordinate_transformation_mode", "align_corners"}},
{4, 1, 4, 4, 4}, X, {4, 1, 3, 2, 2}, Y, excluded_eps);
Expand Down Expand Up @@ -2288,7 +2288,7 @@ TEST(ResizeOpTest, Antialias_Axes_and_PolicyNoLarger) {
50.7f, 51.9f, 54.3f, 55.5f, 56.7f};
// clang-format off
TestAntialiasing(
{{"mode", "linear"}, {"exclude_outside", "1"}, {"axes", "{2,3,4}"}, {"output_shape", "{1,1,3,3,3}"},
{{"mode", "linear"}, {"exclude_outside", "1"}, {"axes", "{2,3,4}"}, {"output_shape", "{1,1,3,3,3}"},
{"policy", "not_larger"}},
{1, 1, 4, 4, 4}, X,
{3, 4, 5}, Y);
Expand All @@ -2303,7 +2303,7 @@ TEST(ResizeOpTest, Antialias_Axes_and_PolicyNoSmaller) {
50.7f, 51.9f, 54.3f, 55.5f, 56.7f};
// clang-format off
TestAntialiasing(
{{"mode", "linear"}, {"exclude_outside", "1"}, {"axes", "{2,3,4}"}, {"output_shape", "{1,1,3,3,3}"},
{{"mode", "linear"}, {"exclude_outside", "1"}, {"axes", "{2,3,4}"}, {"output_shape", "{1,1,3,3,3}"},
{"policy", "not_smaller"}},
{1, 1, 4, 4, 4}, X,
{1, 2, 3}, Y);
Expand Down

0 comments on commit 250f54f

Please sign in to comment.