diff --git a/onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc index 4d2470dfe7deb..50e04df4fe0f2 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc @@ -125,10 +125,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder output = model_builder.GetBuilder().call("instanceNormalization", input, options); // Reshape back to the original output shape for 3D input. if (input_shape.size() != 4) { - std::vector output_shape; - std::transform(input_shape.begin(), input_shape.end(), - std::back_inserter(output_shape), - [](int64_t dim) -> uint32_t { return SafeInt(dim); }); + std::vector output_shape = GetVecUint32FromVecInt64(input_shape); output = model_builder.GetBuilder().call( "reshape", output, emscripten::val::array(output_shape)); } diff --git a/onnxruntime/core/providers/webnn/builders/impl/pool_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/pool_op_builder.cc index 739c3b3f38def..8b3eecf35fcc8 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/pool_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/pool_op_builder.cc @@ -81,7 +81,7 @@ Status PoolOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const auto onnx_kernel_shape = helper.Get("kernel_shape", std::vector{0, 0}); const auto onnx_strides = helper.Get("strides", std::vector{1, 1}); const auto onnx_pads = helper.Get("pads", std::vector{0, 0, 0, 0}); - auto pads = helper.Get("pads", std::vector{0, 0, 0, 0}); + auto pads = helper.Get("pads", std::vector{0, 0, 0, 0}); std::vector input_shape; ORT_RETURN_IF_NOT(GetShape(*input_defs[0], input_shape, logger), "Cannot get shape"); AutoPadType auto_pad_type = StringToAutoPadType(helper.Get("auto_pad", "NOTSET")); @@ -94,12 +94,11 @@ Status PoolOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, auto_pad_type, pads_out, model_builder.GetPreferredLayout() == DataLayout::NCHW)); - std::transform(pads_out.begin(), pads_out.end(), pads.begin(), - [](int64_t pad) -> int32_t { return static_cast(pad); }); + pads = GetVecUint32FromVecInt64(pads_out); } // Permute the ONNX's pads, which is [beginning_height, beginning_width, ending_height, ending_width], // while WebNN's padding is [beginning_height, ending_height, beginning_width, ending_width]. - const std::vector padding{pads[0], pads[2], pads[1], pads[3]}; + const std::vector padding{pads[0], pads[2], pads[1], pads[3]}; options.set("padding", emscripten::val::array(padding)); const auto ceil_mode = helper.Get("ceil_mode", 0); diff --git a/onnxruntime/core/providers/webnn/builders/impl/split_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/split_op_builder.cc index e9a600a5933af..91f21b196be54 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/split_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/split_op_builder.cc @@ -83,10 +83,7 @@ Status SplitOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, std::vector mapping_split; mapping_split.insert(mapping_split.begin(), num_outputs - 1, input_shape[axis] / num_outputs); mapping_split.insert(mapping_split.end(), input_shape[axis] % num_outputs); - std::vector converted_splits; - std::transform(mapping_split.cbegin(), mapping_split.cend(), - std::back_inserter(converted_splits), - [](int64_t dim) -> int32_t { return SafeInt(dim); }); + std::vector converted_splits = GetVecUint32FromVecInt64(mapping_split); output_array = model_builder.GetBuilder().call("split", input, emscripten::val::array(converted_splits), diff --git a/onnxruntime/core/providers/webnn/builders/impl/squeeze_unsqueeze_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/squeeze_unsqueeze_op_builder.cc index 9f6ccb98f79dd..15149bd8fe821 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/squeeze_unsqueeze_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/squeeze_unsqueeze_op_builder.cc @@ -87,10 +87,7 @@ Status SqueezeUnsqueezeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_buil emscripten::val output = emscripten::val::undefined(); // Use WebNN's reshape to implement Squeeze/Unsqueeze. - std::vector new_shape; - std::transform( - input_shape.begin(), input_shape.end(), std::back_inserter(new_shape), - [](int64_t data) -> uint32_t { return SafeInt(data); }); + std::vector new_shape = GetVecUint32FromVecInt64(input_shape); // Sort axes_data in ascending order. std::sort(axes_data.begin(), axes_data.end()); if (op_type == "Squeeze") { diff --git a/onnxruntime/core/providers/webnn/builders/impl/transpose_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/transpose_op_builder.cc index eca1521384643..79f60c51ace1b 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/transpose_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/transpose_op_builder.cc @@ -40,10 +40,7 @@ Status TransposeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, emscripten::val input = model_builder.GetOperand(input_defs[0]->Name()); emscripten::val options = emscripten::val::object(); - std::vector permutation; - std::transform(perm.cbegin(), perm.cend(), - std::back_inserter(permutation), - [](int64_t dim) -> int32_t { return SafeInt(dim); }); + std::vector permutation = GetVecUint32FromVecInt64(perm); options.set("permutation", emscripten::val::array(permutation)); emscripten::val output = model_builder.GetBuilder().call("transpose", input, options); model_builder.AddOperand(node.OutputDefs()[0]->Name(), std::move(output)); diff --git a/onnxruntime/core/providers/webnn/webnn_execution_provider.cc b/onnxruntime/core/providers/webnn/webnn_execution_provider.cc index cfb96af557d35..29c8ca91fe72c 100644 --- a/onnxruntime/core/providers/webnn/webnn_execution_provider.cc +++ b/onnxruntime/core/providers/webnn/webnn_execution_provider.cc @@ -282,9 +282,6 @@ common::Status WebNNExecutionProvider::Compile(const std::vector temp(shape.size()); - transform(shape.begin(), shape.end(), temp.begin(), - [](int64_t dim) -> uint32_t { return SafeInt(dim); }); const void* inputBuffer = const_cast(input_tensor.GetTensorRawData()); inputs.emplace( input_name,