From 25920cb0883874c0bf07b826a5d50f07d0f1fa4b Mon Sep 17 00:00:00 2001 From: TP Boudreau Date: Tue, 19 Mar 2024 23:36:15 +0000 Subject: [PATCH] Fix lint errors --- .../cpu/math/element_wise_ops_test.cc | 88 +++++++++---------- 1 file changed, 42 insertions(+), 46 deletions(-) diff --git a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc index c5aac72f9ac78..c26d8a5231b33 100644 --- a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc +++ b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc @@ -1546,19 +1546,19 @@ TEST(MathOpTest, Min_12_Double) { TEST(MathOpTest, Min_12_Double_Nan) { OpTester test("Min", 12); test.AddInput("data_2", {3, 3}, - {std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - -0.5, 0.0, -2.0, - 0.5, 0.0, 2.0}); - test.AddInput("data_1", {3, 1}, - {0.0, -1.0, 1.0}); - test.AddOutput("min", {3, 3}, {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN(), - -1.0, -1.0, -2.0, - 0.5, 0.0, 1.0}); + -0.5, 0.0, -2.0, + 0.5, 0.0, 2.0}); + test.AddInput("data_1", {3, 1}, + {0.0, -1.0, 1.0}); + test.AddOutput("min", {3, 3}, + {std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + -1.0, -1.0, -2.0, + 0.5, 0.0, 1.0}); test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // TensorRT: Input batch size is inconsistent } @@ -1672,21 +1672,19 @@ TEST(MathOpTest, Min_12_MLFLoat16_Scalar1) { TEST(MathOpTest, Min_12_MLFloat16_Nan) { OpTester test("Min", 12); test.AddInput("data_0", {3, 3}, - MakeMLFloat16({ - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - -0.5f, 0.0f, -2.0f, - 0.5f, 0.0f, 2.0f})); + MakeMLFloat16({std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + -0.5f, 0.0f, -2.0f, + 0.5f, 0.0f, 2.0f})); test.AddInput("data_1", {3, 1}, - MakeMLFloat16({0.0f, -1.0f, 1.0f})); + MakeMLFloat16({0.0f, -1.0f, 1.0f})); test.AddOutput("min", {3, 3}, - MakeMLFloat16({ - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - -1.0f, -1.0f, -2.0f, - 0.5f, 0.0f, 1.0f})); + MakeMLFloat16({std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + -1.0f, -1.0f, -2.0f, + 0.5f, 0.0f, 1.0f})); test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // TensorRT: Input batch size is inconsistent } @@ -1817,19 +1815,19 @@ TEST(MathOpTest, Max_12_Double) { TEST(MathOpTest, Max_12_Double_Nan) { OpTester test("Max", 12); test.AddInput("data_2", {3, 3}, - {std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - -0.5, 0.0, -2.0, - 0.5, 0.0, 2.0}); - test.AddInput("data_1", {3, 1}, - {0.0, -1.0, 1.0}); - test.AddOutput("max", {3, 3}, {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN(), - -0.5, 0.0, -1.0, - 1.0, 1.0, 2.0}); + -0.5, 0.0, -2.0, + 0.5, 0.0, 2.0}); + test.AddInput("data_1", {3, 1}, + {0.0, -1.0, 1.0}); + test.AddOutput("max", {3, 3}, + {std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + -0.5, 0.0, -1.0, + 1.0, 1.0, 2.0}); test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // TensorRT: Input batch size is inconsistent } @@ -1943,21 +1941,19 @@ TEST(MathOpTest, Max_12_MLFLoat16_Scalar1) { TEST(MathOpTest, Max_12_MLFloat16_Nan) { OpTester test("Max", 12); test.AddInput("data_0", {3, 3}, - MakeMLFloat16({ - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - -0.5f, 0.0f, -2.0f, - 0.5f, 0.0f, 2.0f})); + MakeMLFloat16({std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + -0.5f, 0.0f, -2.0f, + 0.5f, 0.0f, 2.0f})); test.AddInput("data_1", {3, 1}, - MakeMLFloat16({0.0f, -1.0f, 1.0f})); + MakeMLFloat16({0.0f, -1.0f, 1.0f})); test.AddOutput("max", {3, 3}, - MakeMLFloat16({ - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN(), - -0.5f, 0.0f, -1.0f, - 1.0f, 1.0f, 2.0f})); + MakeMLFloat16({std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + -0.5f, 0.0f, -1.0f, + 1.0f, 1.0f, 2.0f})); test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // TensorRT: Input batch size is inconsistent }