From 18a7f34ba052d183a254dcdcc9a939790e8c73e0 Mon Sep 17 00:00:00 2001 From: Adrian Lizarraga Date: Tue, 19 Mar 2024 13:48:04 -0700 Subject: [PATCH] [NhwcTransformerTests] Fix linker error due to explicit template instantiation of ModelBuilder methods (#19980) Currently, the nhwc_transformer_test.cc compilation unit defines explicit FP16 versions of `ModelTestBuilder::MakeInput` and `ModelTestBuilder::MakeInitializer` outside of the ModelTestBuilder class's header file. These explicit template instantiations cause linker errors when other compilation units also instantiate these functions due to duplicate definitions. Additionally, the versions defined in nhwc_transformer_test.cc do not really conform to the expected behavior in the original ModelTestBuilder class, which is to make random input/initializer values. Instead, the versions in nhwc_transformer_test.cc create a range of values. The solution is to edit nhwc_transformer_test.cc to use stand-alone static functions that do not change the ModelTestBuilder class. **Note**: This linker error cannot currently be replicated in our CIs because it requires a QNN-HTP-enabled Windows ARM64 environment with `MLAS_F16VEC_INTRINSICS_SUPPORTED` defined. I can replicate on a local build. The linker error/conflict happens with with this new FP16 QNN test: https://github.com/microsoft/onnxruntime/blob/d4c8bc359e321cdabdd87b70b392dd0e7a14502e/onnxruntime/test/providers/qnn/clip_op_test.cc#L186 --- .../test/optimizer/nhwc_transformer_test.cc | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/onnxruntime/test/optimizer/nhwc_transformer_test.cc b/onnxruntime/test/optimizer/nhwc_transformer_test.cc index c254d340cdcb8..e6f0a259805e5 100644 --- a/onnxruntime/test/optimizer/nhwc_transformer_test.cc +++ b/onnxruntime/test/optimizer/nhwc_transformer_test.cc @@ -518,7 +518,7 @@ TEST(NhwcTransformerTests, ConvMixTensorRanks) { #ifdef MLAS_F16VEC_INTRINSICS_SUPPORTED -std::vector randomfp16(const std::vector& shape, MLFloat16 min, MLFloat16 max) { +static std::vector ARangeOfFP16Values(const std::vector& shape, MLFloat16 min, MLFloat16 max) { std::vector val(detail::SizeFromDims(shape)); float start = min.ToFloat(); float end = max.ToFloat(); @@ -534,22 +534,22 @@ std::vector randomfp16(const std::vector& shape, MLFloat16 m return val; } -template <> -NodeArg* ModelTestBuilder::MakeInput(const std::vector& shape, MLFloat16 min, MLFloat16 max) { - return MakeInput(shape, randomfp16(shape, min, max)); +static NodeArg* MakeInputARangeFP16(ModelTestBuilder& builder, const std::vector& shape, + MLFloat16 min, MLFloat16 max) { + return builder.MakeInput(shape, ARangeOfFP16Values(shape, min, max)); } -template <> -NodeArg* ModelTestBuilder::MakeInitializer(const std::vector& shape, MLFloat16 min, MLFloat16 max) { - return MakeInitializer(shape, randomfp16(shape, min, max)); +static NodeArg* MakeInitializerARangeFP16(ModelTestBuilder& builder, const std::vector& shape, + MLFloat16 min, MLFloat16 max) { + return builder.MakeInitializer(shape, ARangeOfFP16Values(shape, min, max)); } TEST(NhwcTransformerTests, ConvFp16) { auto test_case = [&](const std::vector& input_shape, const std::vector& weights_shape) { auto build_test_case = [&](ModelTestBuilder& builder) { - auto* input_arg = builder.MakeInput(input_shape, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* input_arg = MakeInputARangeFP16(builder, input_shape, MLFloat16(-1.5f), MLFloat16(1.5f)); auto* output_arg = builder.MakeOutput(); - auto* weight_arg = builder.MakeInitializer(weights_shape, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* weight_arg = MakeInitializerARangeFP16(builder, weights_shape, MLFloat16(-1.5f), MLFloat16(1.5f)); builder.AddConvNode(input_arg, weight_arg, output_arg); }; @@ -575,10 +575,10 @@ TEST(NhwcTransformerTests, ConvFp16) { TEST(NhwcTransformerTests, ConvMaxPoolFp16) { auto test_case = [&](const std::vector& input_shape, const std::vector& weights_shape) { auto build_test_case = [&](ModelTestBuilder& builder) { - auto* input_arg = builder.MakeInput(input_shape, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* input_arg = MakeInputARangeFP16(builder, input_shape, MLFloat16(-1.5f), MLFloat16(1.5f)); auto* conv_output_arg = builder.MakeIntermediate(); auto* output_arg = builder.MakeOutput(); - auto* conv_weight_arg = builder.MakeInitializer(weights_shape, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* conv_weight_arg = MakeInitializerARangeFP16(builder, weights_shape, MLFloat16(-1.5f), MLFloat16(1.5f)); builder.AddConvNode(input_arg, conv_weight_arg, conv_output_arg); Node& pool_node = builder.AddNode("MaxPool", {conv_output_arg}, {output_arg}); @@ -609,13 +609,13 @@ TEST(NhwcTransformerTests, ConvMaxPoolFp16) { TEST(NhwcTransformerTests, ConvGlobalAveragePoolFp16) { auto build_test_case = [&](ModelTestBuilder& builder) { - auto* input_arg = builder.MakeInput({1, 23, 13, 13}, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* input_arg = MakeInputARangeFP16(builder, {1, 23, 13, 13}, MLFloat16(-1.5f), MLFloat16(1.5f)); auto* conv1_output_arg = builder.MakeIntermediate(); auto* conv2_output_arg = builder.MakeIntermediate(); auto* gavgpool1_output_arg = builder.MakeIntermediate(); auto* output_arg = builder.MakeOutput(); - auto* conv1_weight_arg = builder.MakeInitializer({30, 23, 3, 3}, MLFloat16(-1.5f), MLFloat16(1.5f)); - auto* conv2_weight_arg = builder.MakeInitializer({16, 30, 1, 1}, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* conv1_weight_arg = MakeInitializerARangeFP16(builder, {30, 23, 3, 3}, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* conv2_weight_arg = MakeInitializerARangeFP16(builder, {16, 30, 1, 1}, MLFloat16(-1.5f), MLFloat16(1.5f)); Node& conv1_node = builder.AddConvNode(input_arg, conv1_weight_arg, conv1_output_arg); conv1_node.AddAttribute("pads", std::vector{1, 1, 1, 1}); @@ -640,13 +640,13 @@ TEST(NhwcTransformerTests, ConvGlobalAveragePoolFp16) { TEST(NhwcTransformerTests, ConvAveragePoolFp16) { auto build_test_case = [&](ModelTestBuilder& builder) { - auto* input_arg = builder.MakeInput({1, 23, 13, 13}, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* input_arg = MakeInputARangeFP16(builder, {1, 23, 13, 13}, MLFloat16(-1.5f), MLFloat16(1.5f)); auto* conv1_output_arg = builder.MakeIntermediate(); auto* conv2_output_arg = builder.MakeIntermediate(); auto* avgpool1_output_arg = builder.MakeIntermediate(); auto* output_arg = builder.MakeOutput(); - auto* conv1_weight_arg = builder.MakeInitializer({30, 23, 3, 3}, MLFloat16(-1.5f), MLFloat16(1.5f)); - auto* conv2_weight_arg = builder.MakeInitializer({16, 30, 3, 3}, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* conv1_weight_arg = MakeInitializerARangeFP16(builder, {30, 23, 3, 3}, MLFloat16(-1.5f), MLFloat16(1.5f)); + auto* conv2_weight_arg = MakeInitializerARangeFP16(builder, {16, 30, 3, 3}, MLFloat16(-1.5f), MLFloat16(1.5f)); Node& conv1_node = builder.AddConvNode(input_arg, conv1_weight_arg, conv1_output_arg); conv1_node.AddAttribute("pads", std::vector{1, 1, 1, 1});