From b9e5f1544f805db21bedcaa4fa7c0ab465a26fc8 Mon Sep 17 00:00:00 2001 From: Anagha Rao Date: Tue, 30 Jan 2024 12:08:38 -0800 Subject: [PATCH] Lintrunner --- .../matmul_integer_to_float_test.cc | 39 ++++++++----------- .../test/testdata/matmul_integer_to_float.py | 2 +- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/onnxruntime/test/contrib_ops/matmul_integer_to_float_test.cc b/onnxruntime/test/contrib_ops/matmul_integer_to_float_test.cc index 49560b8ff268a..b2e17c5333319 100644 --- a/onnxruntime/test/contrib_ops/matmul_integer_to_float_test.cc +++ b/onnxruntime/test/contrib_ops/matmul_integer_to_float_test.cc @@ -24,9 +24,7 @@ namespace onnxruntime { namespace test { template -static void CalculateMatMulIntegerToFloat(const int64_t M, const int64_t N, const int64_t K, const std::vector& A_data, const std::vector& A_scale, const std::vector& A_zero_point, const std::vector& B_data, std::vector& B_scale, std::vector& B_zero_point, const - std::vector& Bias, std::vector& Y_data, bool per_column, bool has_zp, bool has_bias) { - +static void CalculateMatMulIntegerToFloat(const int64_t M, const int64_t N, const int64_t K, const std::vector& A_data, const std::vector& A_scale, const std::vector& A_zero_point, const std::vector& B_data, std::vector& B_scale, std::vector& B_zero_point, const std::vector& Bias, std::vector& Y_data, bool per_column, bool has_zp, bool has_bias) { if (!per_column) { B_zero_point.resize(N, B_zero_point[0]); B_scale.resize(N, B_scale[0]); @@ -77,7 +75,7 @@ void TestMatMulIntegerToFloat(bool is_matrix_b_constant, std::numeric_limits::max()); std::transform(tmp_B_data.begin(), tmp_B_data.end(), std::back_inserter(B_data), [](int32_t v) -> WType { - return static_cast(v); + return static_cast(v); }); std::vector A_scale = random.Uniform(AsSpan({1}), -0.1f, 0.1f); @@ -120,22 +118,21 @@ void TestMatMulIntegerToFloat(bool is_matrix_b_constant, std::vector Y_data(M * N); CalculateMatMulIntegerToFloat(M, N, K, A_data, A_scale, A_zero_point, B_data, B_scale, B_zero_point, Bias, Y_data, per_column, has_zp, has_bias); - if ( constexpr(std::is_same_v)) { - test.AddOutput("Y", {M, N}, Y_data); - } else { - test.AddOutput("Y", {M, N}, ToFloat16(Y_data)); - test.SetOutputAbsErr("Y", 0.5f); - } + if (constexpr(std::is_same_v)) { + test.AddOutput("Y", {M, N}, Y_data); + } else { + test.AddOutput("Y", {M, N}, ToFloat16(Y_data)); + test.SetOutputAbsErr("Y", 0.5f); + } // Only DML EP supports these data type combinations for now if ((constexpr(std::is_same_v)) || (constexpr(std::is_same_v) && - /*(constexpr(std::is_same_v) &&*/ !constexpr(std::is_same_v)) - ) { + !constexpr(std::is_same_v))) { std::vector> execution_providers; execution_providers.push_back(DefaultDmlExecutionProvider()); test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); - } else { + } else { test.Run(); } } @@ -191,7 +188,6 @@ TEST(MatMulIntegerToFloat, HasZeroPoint_NoBias_test_U8U8) { RunMatMulIntegerToFloatTest(); } - TEST(MatMulIntegerToFloat, NoZeroPoint_HasBias_test_U8U8) { RunMatMulIntegerToFloatTest(); } @@ -200,7 +196,6 @@ TEST(MatMulIntegerToFloat, NoZeroPoint_NoBias_test_U8U8) { RunMatMulIntegerToFloatTest(); } - TEST(MatMulIntegerToFloat, HasZeroPoint_HasBias_test_U8X8) { RunMatMulIntegerToFloatTest(); } @@ -247,7 +242,7 @@ TEST(MatMulIntegerToFloat, MatMulIntegerToFloat_FP16_U8U8) { int64_t K = 2; std::vector A_data = {1, 5, 2, 1, 9, - 1, 1, 3, 7, 2}; + 1, 1, 3, 7, 2}; std::vector B_data = {3, 7, 2, 1, 1, 2, 1, 9, 1, 1}; std::vector A_scale = ToFloat16({3.0f}); @@ -257,7 +252,6 @@ TEST(MatMulIntegerToFloat, MatMulIntegerToFloat_FP16_U8U8) { std::vector A_zero_point = {3}; std::vector B_zero_point = {5}; - test.AddInput("a_scale", {1}, A_scale); test.AddInput("b_scale", {1}, B_scale); test.AddInput("a_zero_point", {1}, A_zero_point); @@ -281,7 +275,7 @@ TEST(MatMulIntegerToFloat, MatMulIntegerToFloat_FP16_U8S8) { std::vector A_data = {3, 7, 2, 1, 1, 2, 1, 9, 1, 1}; std::vector B_data = {2, -1, -9, 1, 1, - -1, 0, -3, 1, -4}; + -1, 0, -3, 1, -4}; std::vector A_scale = ToFloat16({-4.0f}); std::vector B_scale = ToFloat16({2.0f}); test.AddInput("A", {M, K}, A_data); @@ -312,7 +306,7 @@ TEST(MatMulIntegerToFloat, MatMulIntegerToFloat_FP16_S8S8) { int64_t K = 2; std::vector A_data = {3, 7, -2, 1, 1, - 2, -1, -9, 1, 1}; + 2, -1, -9, 1, 1}; std::vector B_data = {2, -1, -9, 1, 1, -1, 0, -3, 1, -4}; std::vector A_scale = ToFloat16({-4.0f}); @@ -329,7 +323,6 @@ TEST(MatMulIntegerToFloat, MatMulIntegerToFloat_FP16_S8S8) { test.AddInput("b_zero_point", {1}, B_zero_point); test.AddInput("bias", {N}, Bias); - std::vector Y_data(M * N); CalculateMatMulIntegerToFloat(M, N, K, A_data, A_scale, A_zero_point, B_data, B_scale, B_zero_point, Bias, Y_data, false, true, true); @@ -381,10 +374,10 @@ TEST(MatMulIntegerToFloat, MatMulIntegerToFloat_FP16) { int64_t K = 3; std::vector A_data = {11, -2, 5, - -1, 3, 10}; + -1, 3, 10}; std::vector B_data = {-13, -2, - 9, 55, - -1, 23}; + 9, 55, + -1, 23}; std::vector A_scale = ToFloat16({0.910f}); std::vector B_scale = ToFloat16({1.10f, 1.123f}); diff --git a/onnxruntime/test/testdata/matmul_integer_to_float.py b/onnxruntime/test/testdata/matmul_integer_to_float.py index 36902598aad14..0c9ee3f3e6492 100644 --- a/onnxruntime/test/testdata/matmul_integer_to_float.py +++ b/onnxruntime/test/testdata/matmul_integer_to_float.py @@ -78,7 +78,7 @@ def GenerateModel(model_name, sign_i, sign_w, output_type_fp16, has_zp=True, bia if __name__ == "__main__": - #GenerateModel("matmul_integer_to_float16_int8.onnx", sign_i=False, sign_w=True, output_type_fp16=True) + # GenerateModel("matmul_integer_to_float16_int8.onnx", sign_i=False, sign_w=True, output_type_fp16=True) GenerateModel("matmul_integer_to_float_int8.onnx", sign_i=False, sign_w=True, output_type_fp16=False) GenerateModel("matmul_integer_to_float_uint8.onnx", sign_i=False, sign_w=False, output_type_fp16=False) GenerateModel(