Skip to content

Commit

Permalink
[QNN EP] Fix Pad UT (#17982)
Browse files Browse the repository at this point in the history
### Description

QNN EP has 2 unit tests failing:

TEST_F(QnnHTPBackendTests, DISABLED_PadReflectMode)
TEST_F(QnnHTPBackendTests, DISABLED_Pad4dOutOfRangePadConstantValue)

For the first unit test, in QNN's master definition, it is stated that
when using MIRROR_REFLECT, the before and after pad amounts must not be
greater than shape(in[0])[i] - 1. Therefore, we need to change the pad
amount from {0,2,0,0} to {0,1,0,0}.

For second unit test, QNN does not have limitations stating that pad
constant should be smaller than input[0]. The reason that the test is
failing is because the unit test did not take the pad constant into
consideration when doing quantization.

### Motivation and Context
Fix the 2 unit tests mentioned in description.
  • Loading branch information
winskuo-quic authored Nov 3, 2023
1 parent c352e9b commit 90f205e
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 13 deletions.
27 changes: 17 additions & 10 deletions onnxruntime/core/providers/qnn/builder/opbuilder/pad_op_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -202,16 +202,8 @@ Status PadOpBuilder::ProcessAttributesAndOutputs(QnnModelWrapper& qnn_model_wrap
// Qnn format is begin_0, end_0, begin_1, end_1, ...
ReArranagePads(pad_amount);

std::vector<uint32_t> pad_amount_dim{static_cast<uint32_t>(pad_amount.size() / 2), static_cast<uint32_t>(2)};
QnnParamWrapper multiples_param(node_unit.Index(), node_unit.Name(), QNN_OP_PAD_PARAM_PAD_AMOUNT, std::move(pad_amount_dim),
std::move(pad_amount));
param_tensor_names.push_back(multiples_param.GetParamTensorName());
qnn_model_wrapper.AddParamWrapper(std::move(multiples_param));

// Process optional input constant_value
if (node_unit.Inputs().size() > 2) {
ORT_RETURN_IF_ERROR(ProcessConstantValue(qnn_model_wrapper, param_tensor_names, node_unit, inputs[2]));
} // constant_value
std::vector<uint32_t> input_shape;
ORT_RETURN_IF_NOT(qnn_model_wrapper.GetOnnxShape(inputs[0].node_arg, input_shape), "Cannot get shape of input 0.");

NodeAttrHelper node_helper(node_unit);
std::string mode = node_helper.Get("mode", "constant");
Expand All @@ -220,17 +212,32 @@ Status PadOpBuilder::ProcessAttributesAndOutputs(QnnModelWrapper& qnn_model_wrap
if ("constant" == mode) {
mode_qnn_scalar.uint32Value = QNN_OP_PAD_SCHEME_CONSTANT;
} else if ("reflect" == mode) {
for (size_t i = 0; i < input_shape.size(); i++) {
ORT_RETURN_IF(pad_amount[i * 2] > input_shape[i] - 1 || pad_amount[(i * 2) + 1] > input_shape[i] - 1,
"Pad amount should not be greater than shape(input[0])[i] - 1");
}
mode_qnn_scalar.uint32Value = QNN_OP_PAD_SCHEME_MIRROR_REFLECT;
} else if ("edge" == mode) {
mode_qnn_scalar.uint32Value = QNN_OP_PAD_SCHEME_EDGE;
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Pad mode only support constant.");
}

std::vector<uint32_t> pad_amount_dim{static_cast<uint32_t>(pad_amount.size() / 2), static_cast<uint32_t>(2)};
QnnParamWrapper mode_param(node_unit.Index(), node_unit.Name(), QNN_OP_PAD_PARAM_SCHEME, mode_qnn_scalar);
param_tensor_names.push_back(mode_param.GetParamTensorName());
qnn_model_wrapper.AddParamWrapper(std::move(mode_param));

QnnParamWrapper multiples_param(node_unit.Index(), node_unit.Name(), QNN_OP_PAD_PARAM_PAD_AMOUNT,
std::move(pad_amount_dim), std::move(pad_amount));
param_tensor_names.push_back(multiples_param.GetParamTensorName());
qnn_model_wrapper.AddParamWrapper(std::move(multiples_param));

// Process optional input constant_value
if (node_unit.Inputs().size() > 2) {
ORT_RETURN_IF_ERROR(ProcessConstantValue(qnn_model_wrapper, param_tensor_names, node_unit, inputs[2]));
} // constant_value

ORT_RETURN_IF_ERROR(ProcessOutputs(qnn_model_wrapper, node_unit,
std::move(input_names),
std::move(param_tensor_names),
Expand Down
30 changes: 27 additions & 3 deletions onnxruntime/test/providers/qnn/pad_op_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ TEST_F(QnnCPUBackendTests, Pad2dPadsNotIni) {
TEST_F(QnnCPUBackendTests, DISABLED_PadModeReflect) {
bool has_constant_value = false;
RunPadOpTest(TestInputDef<float>({3, 2}, false, {1.0f, 1.2f, 2.3f, 3.4f, 4.5f, 5.6f}),
TestInputDef<int64_t>({4}, true, {0, 2, 0, 0}),
TestInputDef<int64_t>({4}, true, {0, 1, 0, 0}),
TestInputDef<float>({1}, true, {0.0f}),
{utils::MakeAttribute("mode", "reflect")},
ExpectedEPNodeAssignment::All,
Expand Down Expand Up @@ -266,13 +266,37 @@ TEST_F(QnnHTPBackendTests, PadHasConstantValueQuantized) {
constant_value_quantized);
}

// QNN graph execute error. Error code: 6031
TEST_F(QnnHTPBackendTests, DISABLED_PadReflectMode) {
TEST_F(QnnHTPBackendTests, PadReflectMode) {
bool has_constant_value_input = false;
RunQDQPadOpTest<uint8_t>(TestInputDef<float>({3, 2}, false, {1.0f, 1.2f, 2.3f, 3.4f, 4.5f, 5.6f}),
TestInputDef<int64_t>({4}, true, {0, 1, 0, 0}),
TestInputDef<float>({1}, true, {0.0f}),
{utils::MakeAttribute("mode", "reflect")},
ExpectedEPNodeAssignment::All,
has_constant_value_input);
}

// Pad amount should not be greater than shape(input[0])[i] - 1
TEST_F(QnnHTPBackendTests, PadReflectModeOutOfRangePadAmount) {
bool has_constant_value_input = false;
RunQDQPadOpTest<uint8_t>(TestInputDef<float>({3, 2}, false, {1.0f, 1.2f, 2.3f, 3.4f, 4.5f, 5.6f}),
TestInputDef<int64_t>({4}, true, {0, 2, 0, 0}),
TestInputDef<float>({1}, true, {0.0f}),
{utils::MakeAttribute("mode", "reflect")},
ExpectedEPNodeAssignment::None,
has_constant_value_input);
}

TEST_F(QnnHTPBackendTests, Pad4dReflectMode) {
bool has_constant_value_input = false;
RunQDQPadOpTest<uint8_t>(TestInputDef<float>({1, 2, 2, 2}, false,
{1.0f, 2.0f,
3.0f, 4.0f,
5.0f, 6.0f,
7.0f, 8.0f}),
TestInputDef<int64_t>({8}, true, {0, 1, 1, 1, 0, 1, 1, 1}),
TestInputDef<float>({1}, true, {0.0f}),
{utils::MakeAttribute("mode", "reflect")},
ExpectedEPNodeAssignment::All,
has_constant_value_input);
}
Expand Down

0 comments on commit 90f205e

Please sign in to comment.