Skip to content

Commit

Permalink
[WebNN EP] Use GetVecUint32FromVecInt64 to simplify the code (#19324)
Browse files Browse the repository at this point in the history
- Use the function `GetVecUint32FromVecInt64` in helper.h to replace
`transform`.
- Change some `int32_t` to `uint32_t`.
- Remove a useless `temp`.
  • Loading branch information
zesongw authored Jan 31, 2024
1 parent 3262e8d commit d87f73a
Show file tree
Hide file tree
Showing 6 changed files with 7 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
output = model_builder.GetBuilder().call<emscripten::val>("instanceNormalization", input, options);
// Reshape back to the original output shape for 3D input.
if (input_shape.size() != 4) {
std::vector<uint32_t> output_shape;
std::transform(input_shape.begin(), input_shape.end(),
std::back_inserter(output_shape),
[](int64_t dim) -> uint32_t { return SafeInt<uint32_t>(dim); });
std::vector<uint32_t> output_shape = GetVecUint32FromVecInt64(input_shape);
output = model_builder.GetBuilder().call<emscripten::val>(
"reshape", output, emscripten::val::array(output_shape));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ Status PoolOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
const auto onnx_kernel_shape = helper.Get("kernel_shape", std::vector<int64_t>{0, 0});
const auto onnx_strides = helper.Get("strides", std::vector<int64_t>{1, 1});
const auto onnx_pads = helper.Get("pads", std::vector<int64_t>{0, 0, 0, 0});
auto pads = helper.Get("pads", std::vector<int32_t>{0, 0, 0, 0});
auto pads = helper.Get("pads", std::vector<uint32_t>{0, 0, 0, 0});
std::vector<int64_t> input_shape;
ORT_RETURN_IF_NOT(GetShape(*input_defs[0], input_shape, logger), "Cannot get shape");
AutoPadType auto_pad_type = StringToAutoPadType(helper.Get("auto_pad", "NOTSET"));
Expand All @@ -94,12 +94,11 @@ Status PoolOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
auto_pad_type,
pads_out,
model_builder.GetPreferredLayout() == DataLayout::NCHW));
std::transform(pads_out.begin(), pads_out.end(), pads.begin(),
[](int64_t pad) -> int32_t { return static_cast<int32_t>(pad); });
pads = GetVecUint32FromVecInt64(pads_out);
}
// Permute the ONNX's pads, which is [beginning_height, beginning_width, ending_height, ending_width],
// while WebNN's padding is [beginning_height, ending_height, beginning_width, ending_width].
const std::vector<int32_t> padding{pads[0], pads[2], pads[1], pads[3]};
const std::vector<uint32_t> padding{pads[0], pads[2], pads[1], pads[3]};
options.set("padding", emscripten::val::array(padding));

const auto ceil_mode = helper.Get("ceil_mode", 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,7 @@ Status SplitOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
std::vector<int64_t> mapping_split;
mapping_split.insert(mapping_split.begin(), num_outputs - 1, input_shape[axis] / num_outputs);
mapping_split.insert(mapping_split.end(), input_shape[axis] % num_outputs);
std::vector<int32_t> converted_splits;
std::transform(mapping_split.cbegin(), mapping_split.cend(),
std::back_inserter(converted_splits),
[](int64_t dim) -> int32_t { return SafeInt<int32_t>(dim); });
std::vector<uint32_t> converted_splits = GetVecUint32FromVecInt64(mapping_split);
output_array = model_builder.GetBuilder().call<emscripten::val>("split",
input,
emscripten::val::array(converted_splits),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,7 @@ Status SqueezeUnsqueezeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_buil

emscripten::val output = emscripten::val::undefined();
// Use WebNN's reshape to implement Squeeze/Unsqueeze.
std::vector<uint32_t> new_shape;
std::transform(
input_shape.begin(), input_shape.end(), std::back_inserter(new_shape),
[](int64_t data) -> uint32_t { return SafeInt<uint32_t>(data); });
std::vector<uint32_t> new_shape = GetVecUint32FromVecInt64(input_shape);
// Sort axes_data in ascending order.
std::sort(axes_data.begin(), axes_data.end());
if (op_type == "Squeeze") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,7 @@ Status TransposeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,

emscripten::val input = model_builder.GetOperand(input_defs[0]->Name());
emscripten::val options = emscripten::val::object();
std::vector<int32_t> permutation;
std::transform(perm.cbegin(), perm.cend(),
std::back_inserter(permutation),
[](int64_t dim) -> int32_t { return SafeInt<int32_t>(dim); });
std::vector<uint32_t> permutation = GetVecUint32FromVecInt64(perm);
options.set("permutation", emscripten::val::array(permutation));
emscripten::val output = model_builder.GetBuilder().call<emscripten::val>("transpose", input, options);
model_builder.AddOperand(node.OutputDefs()[0]->Name(), std::move(output));
Expand Down
3 changes: 0 additions & 3 deletions onnxruntime/core/providers/webnn/webnn_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -282,9 +282,6 @@ common::Status WebNNExecutionProvider::Compile(const std::vector<FusedNodeAndGra
// Since all the input output of WebNN EP is MultiArray, we will make the scalar input as a {1} MultiArray.
if (shape.empty())
shape.push_back(1);
std::vector<int> temp(shape.size());
transform(shape.begin(), shape.end(), temp.begin(),
[](int64_t dim) -> uint32_t { return SafeInt<int32_t>(dim); });
const void* inputBuffer = const_cast<void*>(input_tensor.GetTensorRawData());
inputs.emplace(
input_name,
Expand Down

0 comments on commit d87f73a

Please sign in to comment.