Skip to content

Commit

Permalink
Adding more unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
gedoensmax committed Aug 29, 2023
1 parent 93759d6 commit abe101d
Show file tree
Hide file tree
Showing 6 changed files with 298 additions and 60 deletions.
1 change: 1 addition & 0 deletions cmake/onnxruntime_unittests.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,7 @@ list(APPEND onnxruntime_test_providers_src ${onnxruntime_test_providers_cpu_src}
if (onnxruntime_USE_CUDA AND NOT onnxruntime_MINIMAL_BUILD AND NOT onnxruntime_REDUCED_OPS_BUILD)
file(GLOB onnxruntime_test_providers_cuda_src CONFIGURE_DEPENDS
"${TEST_SRC_DIR}/providers/cuda/*"
"${TEST_SRC_DIR}/providers/cuda/nhwc/*.cc"
)
list(APPEND onnxruntime_test_providers_src ${onnxruntime_test_providers_cuda_src})
endif()
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/cuda/nn/batch_norm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ Status BatchNorm<T, NHWC>::ComputeInternal(OpKernelContext* p_op_kernel_context)
CudnnTensor data_desc;
vector<int64_t> new_dims;
BatchNormHelper::NormalizeDims(x_shape, new_dims);
ORT_RETURN_IF_ERROR(data_desc.Set(new_dims, CudnnTensor::GetDataType<CudaT>()));
ORT_RETURN_IF_ERROR(data_desc.Set(new_dims, CudnnTensor::GetDataType<CudaT>(), NHWC));

// For half data type, the alpha, beta, scale, B, mean, var need to be float type
if (X->IsDataType<MLFloat16>()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,26 +2,7 @@
// Copyright (c) 2023 NVIDIA Corporation.
// Licensed under the MIT License.

#include "core/providers/cuda/cuda_provider_options.h"
#include "core/providers/common.h"

#include "test/providers/compare_provider_test_utils.h"
#include "test/common/cuda_op_test_utils.h"

#include "gtest/gtest.h"

#define MAKE_PROVIDERS() \
std::vector<std::shared_ptr<IExecutionProvider>> execution_providers; \
OrtCUDAProviderOptionsV2 nhwc = { \
.prefer_nhwc = true}; \
execution_providers.push_back(CudaExecutionProviderWithOptions(&nhwc)); \
\
double error_tolerance = 1e-3; \
OrtCUDAProviderOptionsV2 nchw = { \
.prefer_nhwc = false}; \
auto source_ep = CudaExecutionProviderWithOptions(&nchw); \
auto test = op.get_test(); \
test->CompareEPs(std::move(source_ep), execution_providers, error_tolerance);
#include "nhwc_cuda_helper.h"

namespace onnxruntime {
namespace test {
Expand All @@ -38,21 +19,19 @@ struct ConvOp {
std::vector<int64_t> dilations = {1, 1};

std::unique_ptr<CompareOpTester> get_test() {
// create rand inputs
RandomValueGenerator random{};

auto test = std::make_unique<CompareOpTester>("Conv", 7);
std::vector<T> input_data = random.Gaussian<T>(input_dims, 0.0f, 0.3f);
// GTEST_ASSERT_EQ(kernel_shape.size(), 2);
std::vector<T> input_data = random.Uniform<T>(input_dims, 0.0f, 0.3f);

std::vector<int64_t> weight_dims{channels, input_dims[1] / group, kernel_shape[0], kernel_shape[1]};
std::vector<T> weight_data = random.Gaussian<T>(weight_dims, 0.0f, 0.3f);
std::vector<T> weight_data = random.Uniform<T>(weight_dims, 0.0f, 0.3f);

test->AddInput<T>("X", input_dims, input_data);
test->AddInput<T>("W", weight_dims, weight_data, true);
if (bias) {
std::vector<int64_t> bias_dims{channels};
std::vector<T> bias_data = random.Gaussian<T>(bias_dims, 0.0f, 0.3f);
std::vector<T> bias_data = random.Uniform<T>(bias_dims, 0.0f, 0.3f);
test->AddInput<T>("B", bias_dims, bias_data, true);
}
test->AddAttribute("group", group);
Expand Down Expand Up @@ -86,21 +65,19 @@ struct ConvTransposeOp {
std::vector<int64_t> dilations = {1, 1};

std::unique_ptr<CompareOpTester> get_test() {
// create rand inputs
RandomValueGenerator random{};

auto test = std::make_unique<CompareOpTester>("ConvTranspose", 14);
std::vector<T> input_data = random.Gaussian<T>(input_dims, 0.0f, 0.3f);
// GTEST_ASSERT_EQ(kernel_shape.size(), 2);
std::vector<T> input_data = random.Uniform<T>(input_dims, 0.0f, 0.3f);

std::vector<int64_t> weight_dims{input_dims[1], channels / group, kernel_shape[0], kernel_shape[1]};
std::vector<T> weight_data = random.Gaussian<T>(weight_dims, 0.0f, 0.3f);
std::vector<T> weight_data = random.Uniform<T>(weight_dims, 0.2f, 0.5f);

test->AddInput<T>("X", input_dims, input_data);
test->AddInput<T>("W", weight_dims, weight_data, true);
if (bias) {
std::vector<int64_t> bias_dims{channels};
std::vector<T> bias_data = random.Gaussian<T>(bias_dims, 0.0f, 0.3f);
std::vector<T> bias_data = random.Uniform<T>(bias_dims, 0.0f, 0.4f);
test->AddInput<T>("B", bias_dims, bias_data, true);
}
test->AddAttribute("group", group);
Expand All @@ -125,13 +102,24 @@ struct ConvTransposeOp {
};

TEST(CudaNhwcTest, ConvNhwcBias) {
auto op = ConvOp<float>{
.input_dims = {1, 16, 64, 64},
.kernel_shape = {3, 3},
.channels = 16,
.bias = true};

MAKE_PROVIDERS()
{
auto op = ConvOp<float>{
.input_dims = {1, 16, 64, 64},
.kernel_shape = {3, 3},
.channels = 16,
.bias = true};

MAKE_PROVIDERS()
}
{
auto op = ConvOp<MLFloat16>{
.input_dims = {1, 16, 64, 64},
.kernel_shape = {3, 3},
.channels = 16,
.bias = true};

MAKE_PROVIDERS_EPS(1e-2)
}
}

TEST(CudaNhwcTest, ConvNhwcGroupNoBias) {
Expand All @@ -155,34 +143,68 @@ TEST(CudaNhwcTest, ConvNhwcPadding) {
}

TEST(CudaNhwcTest, ConvTransposeNhwcGroupNoBias) {
auto op = ConvTransposeOp<float>{
.input_dims = {8, 8, 32, 32},
.kernel_shape = {3, 3},
.channels = 16,
.group = 4};

MAKE_PROVIDERS()
{
auto op = ConvTransposeOp<float>{
.input_dims = {8, 8, 32, 32},
.kernel_shape = {3, 3},
.channels = 16,
.group = 4};

MAKE_PROVIDERS()
}
{
auto op = ConvTransposeOp<MLFloat16>{
.input_dims = {8, 8, 32, 32},
.kernel_shape = {3, 3},
.channels = 16,
.group = 4};

MAKE_PROVIDERS()
}
}

TEST(CudaNhwcTest, ConvTransposeNhwcBias) {
auto op = ConvTransposeOp<float>{
.input_dims = {1, 8, 80, 80},
.kernel_shape = {5, 5},
.channels = 16,
.bias = true};

MAKE_PROVIDERS()
{
auto op = ConvTransposeOp<float>{
.input_dims = {1, 8, 80, 80},
.kernel_shape = {5, 5},
.channels = 16,
.bias = true};

MAKE_PROVIDERS()
}
{
auto op = ConvTransposeOp<MLFloat16>{
.input_dims = {1, 8, 80, 80},
.kernel_shape = {5, 5},
.channels = 16,
.bias = true};

MAKE_PROVIDERS()
}
}

TEST(CudaNhwcTest, ConvTransposeNhwcPad) {
auto op = ConvTransposeOp<float>{
.input_dims = {1, 16, 8, 8},
.kernel_shape = {3, 3},
.channels = 32,
.padding = {2, 2, 2, 2},
.output_padding = {}};

MAKE_PROVIDERS()
{
auto op = ConvTransposeOp<float>{
.input_dims = {1, 16, 8, 8},
.kernel_shape = {3, 3},
.channels = 32,
.padding = {2, 2, 2, 2},
.output_padding = {}};

MAKE_PROVIDERS()
}
{
auto op = ConvTransposeOp<MLFloat16>{
.input_dims = {1, 16, 8, 8},
.kernel_shape = {3, 3},
.channels = 32,
.padding = {2, 2, 2, 2},
.output_padding = {}};

MAKE_PROVIDERS()
}
}

TEST(CudaNhwcTest, ConvTransposeNhwcOutPad) {
Expand Down
26 changes: 26 additions & 0 deletions onnxruntime/test/providers/cuda/nhwc/nhwc_cuda_helper.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Copyright (c) 2023 NVIDIA Corporation.
// Licensed under the MIT License.

#include "core/providers/cuda/cuda_provider_options.h"
#include "core/providers/common.h"

#include "test/providers/compare_provider_test_utils.h"
#include "test/common/cuda_op_test_utils.h"

#include "gtest/gtest.h"

#define MAKE_PROVIDERS_EPS(eps) \
std::vector<std::shared_ptr<IExecutionProvider>> execution_providers; \
OrtCUDAProviderOptionsV2 nhwc = { \
.prefer_nhwc = true}; \
execution_providers.push_back(CudaExecutionProviderWithOptions(&nhwc)); \
\
double error_tolerance = eps; \
OrtCUDAProviderOptionsV2 nchw = { \
.prefer_nhwc = false}; \
auto source_ep = CudaExecutionProviderWithOptions(&nchw); \
auto test = op.get_test(); \
test->CompareEPs(std::move(source_ep), execution_providers, error_tolerance);

#define MAKE_PROVIDERS() MAKE_PROVIDERS_EPS(1e-3)
68 changes: 68 additions & 0 deletions onnxruntime/test/providers/cuda/nhwc/norm_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Copyright (c) 2023 NVIDIA Corporation.
// Licensed under the MIT License.

#include "nhwc_cuda_helper.h"

namespace onnxruntime {
namespace test {

template <typename T>
struct BatchNormOp {
const std::vector<int64_t> input_dims;

std::unique_ptr<CompareOpTester> get_test() {
// create rand inputs
RandomValueGenerator random{};

auto test = std::make_unique<CompareOpTester>("BatchNormalization", 14);
std::vector<T> input_data = random.Uniform<T>(input_dims, 0.0f, 0.3f);
auto channels = input_dims[1];
test->AddInput<T>("X", input_dims, input_data);

std::vector<int64_t> bias_dims{channels};
std::vector<T> bias_data = random.Uniform<T>(bias_dims, 0.2f, 1.0f);
test->AddInput<T>("B", bias_dims, bias_data);
// we simply gonna reuse the bias data here.
test->AddInput<T>("scale", bias_dims, bias_data);

std::vector<int64_t> mean{channels};
std::vector<T> mean_data = random.Uniform<T>(mean, 0.7f, 0.8f);
test->AddInput<T>("input_mean", bias_dims, bias_data);
std::vector<int64_t> var{channels};
std::vector<T> var_data = random.Uniform<T>(var, 0.0f, 0.1f);
test->AddInput<T>("input_var", bias_dims, bias_data);

std::vector<T> output_data = FillZeros<T>(input_dims);
test->AddOutput<T>("Y", input_dims, output_data);
return test;
}
};

TEST(CudaNhwcTest, BatchNormNhwcBias) {
{
auto op = BatchNormOp<float>{
.input_dims = {4, 16, 64, 64},
};

MAKE_PROVIDERS()
}
{
auto op = BatchNormOp<MLFloat16>{
.input_dims = {4, 16, 64, 64},
};

MAKE_PROVIDERS_EPS(1e-2)
}
}

// TEST(CudaNhwcTest, InstancehNormNhwc) {
// auto op = InsanceNormOp<MLFloat16>{
// .input_dims = {4, 16, 64, 64},
// };

// MAKE_PROVIDERS()
// }

} // namespace test
} // namespace onnxruntime
Loading

0 comments on commit abe101d

Please sign in to comment.