Skip to content

Commit

Permalink
Addressed comments
Browse files Browse the repository at this point in the history
  • Loading branch information
Honry committed Dec 20, 2024
1 parent c8d9efc commit 49801bd
Show file tree
Hide file tree
Showing 9 changed files with 51 additions and 45 deletions.
4 changes: 4 additions & 0 deletions onnxruntime/core/providers/webnn/builders/helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,10 @@ inline bool IsEmptyTensor(const InitializedTensorSet& initializers, const std::s
return std::any_of(dims.begin(), dims.end(), [](auto d) { return d == 0; });
}

inline bool TensorExists(const ConstPointerContainer<std::vector<NodeArg*>>& defs, size_t tensor_index) noexcept {
return tensor_index < defs.size() && defs[tensor_index]->Exists();
}

bool IsTensorShapeSupported(const NodeArg& node_arg, const std::string& parent_name,
const logging::Logger& logger, bool allow_empty_input = false);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -405,8 +405,8 @@ bool ConvOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initia
int32_t input1_type; // weight data type
int32_t input2_type; // bias or x_zero_point data type
int32_t input3_type; // w_zero_point data type
bool has_input2 = input_defs.size() > 2 && input_defs[2]->Exists();
bool has_input3 = input_defs.size() > 3 && input_defs[3]->Exists();
bool has_input2 = TensorExists(input_defs, 2);
bool has_input3 = TensorExists(input_defs, 3);

if (!GetType(*input_defs[0], input0_type, logger) ||
!GetType(*input_defs[1], input1_type, logger) ||
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -742,7 +742,7 @@ bool EinsumOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* init
const auto& op_type = node.OpType();
int32_t input0_type;
int32_t input1_type;
bool has_input1 = input_defs.size() > 1 && input_defs[1]->Exists();
bool has_input1 = TensorExists(input_defs, 1);

if (!GetType(*input_defs[0], input0_type, logger) ||
(has_input1 && !GetType(*input_defs[1], input1_type, logger))) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,8 +223,8 @@ bool GemmOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initia
int32_t input1_type; // B data type
int32_t input2_type; // C or a_zero_point data type
int32_t input3_type; // b_zero_point data type
bool has_input2 = input_defs.size() > 2 && input_defs[2]->Exists();
bool has_input3 = input_defs.size() > 3 && input_defs[3]->Exists();
bool has_input2 = TensorExists(input_defs, 2);
bool has_input3 = TensorExists(input_defs, 3);

if (!GetType(*input_defs[0], input0_type, logger) ||
!GetType(*input_defs[1], input1_type, logger) ||
Expand Down
20 changes: 10 additions & 10 deletions onnxruntime/core/providers/webnn/builders/impl/gru_op_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class GruOpBuilder : public BaseOpBuilder {
};

void GruOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const {
if (node.InputDefs().size() > 4 && node.InputDefs()[4]->Exists()) {
if (TensorExists(node.InputDefs(), 4)) {
model_builder.AddInitializerToSkip(node.InputDefs()[4]->Name()); // sequence_lens
model_builder.AddInputToSkip(node.InputDefs()[4]->Name());
}
Expand All @@ -56,7 +56,7 @@ Status GruOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const No
options.set("label", node.Name());
options.set("layout", emscripten::val("zrn"));

if (input_defs.size() > 3 && input_defs[3]->Exists()) {
if (TensorExists(input_defs, 3)) {
emscripten::val bias = model_builder.GetOperand(input_defs[3]->Name());
emscripten::val split_options = emscripten::val::object();
split_options.set("label", node.Name() + "_split");
Expand All @@ -68,16 +68,16 @@ Status GruOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const No
options.set("recurrentBias", splitted_biases[1]);
}

if (input_defs.size() > 5 && input_defs[5]->Exists()) {
if (TensorExists(input_defs, 5)) {
options.set("initialHiddenState", model_builder.GetOperand(input_defs[5]->Name()));
}

bool linear_before_reset = !!helper.Get("linear_before_reset ", 0);
options.set("resetAfter", linear_before_reset);

const auto& output_defs = node.OutputDefs();
bool has_Y = output_defs.size() > 0 && output_defs[0]->Exists();
bool has_Y_h = output_defs.size() > 1 && output_defs[1]->Exists();
bool has_Y = TensorExists(output_defs, 0);
bool has_Y_h = TensorExists(output_defs, 1);
options.set("returnSequence", has_Y);

std::string direction = helper.Get("direction", "forward");
Expand Down Expand Up @@ -134,7 +134,7 @@ bool GruOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers, c
}
int32_t steps = static_cast<int32_t>(input_shape[0]);

if (input_defs.size() > 4 && input_defs[4]->Exists()) {
if (TensorExists(input_defs, 4)) {
if (!Contains(initializers, input_defs[4]->Name())) {
LOGS(logger, ERROR) << "GRU: sequence_lens must be constant";
return false;
Expand Down Expand Up @@ -196,8 +196,8 @@ bool GruOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initial
int32_t input_R_type = 0; // recurrent weight data type
int32_t input_B_type = 0; // bias data type
int32_t input_initial_h_type = 0; // initial hidden state data type
bool has_input_B = input_defs.size() > 3 && input_defs[3]->Exists();
bool has_input_initial_h = input_defs.size() > 5 && input_defs[5]->Exists();
bool has_input_B = TensorExists(input_defs, 3);
bool has_input_initial_h = TensorExists(input_defs, 5);

if (!GetType(*input_defs[0], input_X_type, logger) ||
!GetType(*input_defs[1], input_W_type, logger) ||
Expand Down Expand Up @@ -229,8 +229,8 @@ bool GruOpBuilder::HasSupportedOutputsImpl(const Node& node,
const auto& op_type = node.OpType();
int32_t Y_type = 0;
int32_t Y_h_type = 0;
bool has_Y = output_defs.size() > 0 && output_defs[0]->Exists();
bool has_Y_h = output_defs.size() > 1 && output_defs[1]->Exists();
bool has_Y = TensorExists(output_defs, 0);
bool has_Y_h = TensorExists(output_defs, 1);

bool Y_supported = has_Y && GetType(*output_defs[0], Y_type, logger);
bool Y_h_supported = has_Y_h && GetType(*output_defs[1], Y_h_type, logger);
Expand Down
32 changes: 16 additions & 16 deletions onnxruntime/core/providers/webnn/builders/impl/lstm_op_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class LstmOpBuilder : public BaseOpBuilder {
};

void LstmOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const {
if (node.InputDefs().size() > 4 && node.InputDefs()[4]->Exists()) {
if (TensorExists(node.InputDefs(), 4)) {
model_builder.AddInitializerToSkip(node.InputDefs()[4]->Name()); // sequence_lens
model_builder.AddInputToSkip(node.InputDefs()[4]->Name());
}
Expand All @@ -56,7 +56,7 @@ Status LstmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
options.set("label", node.Name());
options.set("layout", emscripten::val("iofg"));

if (input_defs.size() > 3 && input_defs[3]->Exists()) {
if (TensorExists(input_defs, 3)) {
emscripten::val bias = model_builder.GetOperand(input_defs[3]->Name());
emscripten::val split_options = emscripten::val::object();
split_options.set("axis", 1);
Expand All @@ -67,13 +67,13 @@ Status LstmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
options.set("bias", splitted_biases[0]);
options.set("recurrentBias", splitted_biases[1]);
}
if (input_defs.size() > 5 && input_defs[5]->Exists()) {
if (TensorExists(input_defs, 5)) {
options.set("initialHiddenState", model_builder.GetOperand(input_defs[5]->Name()));
}
if (input_defs.size() > 6 && input_defs[6]->Exists()) {
if (TensorExists(input_defs, 6)) {
options.set("initialCellState", model_builder.GetOperand(input_defs[6]->Name()));
}
if (input_defs.size() > 7 && input_defs[7]->Exists()) {
if (TensorExists(input_defs, 7)) {
options.set("peepholeWeight", model_builder.GetOperand(input_defs[7]->Name()));
}

Expand All @@ -87,9 +87,9 @@ Status LstmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
}

const auto& output_defs = node.OutputDefs();
bool has_Y = output_defs.size() > 0 && output_defs[0]->Exists();
bool has_Y_h = output_defs.size() > 1 && output_defs[1]->Exists();
bool has_Y_c = output_defs.size() > 2 && output_defs[2]->Exists();
bool has_Y = TensorExists(output_defs, 0);
bool has_Y_h = TensorExists(output_defs, 1);
bool has_Y_c = TensorExists(output_defs, 2);
options.set("returnSequence", has_Y);

if (helper.HasAttr("activations")) {
Expand Down Expand Up @@ -140,7 +140,7 @@ bool LstmOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers,
}
int32_t steps = static_cast<int32_t>(input_shape[0]);

if (input_defs.size() > 4 && input_defs[4]->Exists()) {
if (TensorExists(input_defs, 4)) {
if (!Contains(initializers, input_defs[4]->Name())) {
LOGS(logger, ERROR) << "LSTM: sequence_lens must be constant";
return false;
Expand Down Expand Up @@ -210,10 +210,10 @@ bool LstmOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initia
int32_t input5_type = 0; // initialHiddenState data type
int32_t input6_type = 0; // initialCellState data type
int32_t input7_type = 0; // peepholeWeight data type
bool has_input3 = input_defs.size() > 3 && input_defs[3]->Exists();
bool has_input5 = input_defs.size() > 5 && input_defs[5]->Exists();
bool has_input6 = input_defs.size() > 6 && input_defs[6]->Exists();
bool has_input7 = input_defs.size() > 7 && input_defs[7]->Exists();
bool has_input3 = TensorExists(input_defs, 3);
bool has_input5 = TensorExists(input_defs, 5);
bool has_input6 = TensorExists(input_defs, 6);
bool has_input7 = TensorExists(input_defs, 7);

if (!GetType(*input_defs[0], input0_type, logger) ||
!GetType(*input_defs[1], input1_type, logger) ||
Expand Down Expand Up @@ -253,9 +253,9 @@ bool LstmOpBuilder::HasSupportedOutputsImpl(const Node& node,
int32_t Y_type = 0;
int32_t Y_h_type = 0;
int32_t Y_c_type = 0;
bool has_Y = output_defs.size() > 0 && output_defs[0]->Exists();
bool has_Y_h = output_defs.size() > 1 && output_defs[1]->Exists();
bool has_Y_c = output_defs.size() > 2 && output_defs[2]->Exists();
bool has_Y = TensorExists(output_defs, 0);
bool has_Y_h = TensorExists(output_defs, 1);
bool has_Y_c = TensorExists(output_defs, 2);

if (has_Y && GetType(*output_defs[0], Y_type, logger)) {
return IsDataTypeSupportedByOp(op_type, Y_type, wnn_limits, "outputs", "Y", logger);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder

const size_t bias_input_index = op_type == "SkipSimplifiedLayerNormalization" ? 3 : 2;
emscripten::val bias = emscripten::val::undefined();
if (input_defs.size() > bias_input_index && input_defs[bias_input_index]->Exists()) {
if (TensorExists(input_defs, bias_input_index)) {
// Bias input exists, and bias's shape should be the same as scale's shape.
std::vector<int64_t> bias_shape;
ORT_RETURN_IF_NOT(GetShape(*input_defs[bias_input_index], bias_shape, logger), "Cannot get bias shape");
Expand Down Expand Up @@ -153,7 +153,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
}

// SkipSimplifiedLayerNormalization's output input_skip_bias_sum is the sum of input, skip, and bias.
if (op_type == "SkipSimplifiedLayerNormalization" && output_defs.size() > 3 && output_defs[3]->Exists()) {
if (op_type == "SkipSimplifiedLayerNormalization" && TensorExists(output_defs, 3)) {
emscripten::val skip = model_builder.GetOperand(input_defs[1]->Name());
common_options.set("label", node.Name() + "_add_skip");
input_skip_bias_sum = model_builder.GetBuilder().call<emscripten::val>("add", input, skip, common_options);
Expand Down Expand Up @@ -243,12 +243,14 @@ bool NormalizationOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initi

const auto& output_defs = node.OutputDefs();
if (op_type == "SkipSimplifiedLayerNormalization") {
for (size_t i = 1; i < output_defs.size(); i++) {
if (output_defs[i]->Exists() && i < 3) {
// Output mean and inv_std_var are used for training mode, which is not supported.
const auto output_name = i == 1 ? "mean" : "inv_std_var";
LOGS(logger, VERBOSE) << "SkipSimplifiedLayerNormalization's output: " << output_name << " is not supported.";
}
if (output_defs.size() > 4) {
LOGS(logger, VERBOSE) << "SkipSimplifiedLayerNormalization output count must not exceed 4.";
return false;
}
if (TensorExists(output_defs, 1) || TensorExists(output_defs, 2)) {
// Output mean and inv_std_var are used for training mode, which is not supported.
LOGS(logger, VERBOSE) << "SkipSimplifiedLayerNormalization's output mean and inv_std_var are not supported.";
return false;
}
} else {
if (output_defs.size() != 1) {
Expand All @@ -275,9 +277,9 @@ bool NormalizationOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet&
int32_t input2_type; // B data type
int32_t input3_type; // mean data type
int32_t input4_type; // var data type
bool has_input2 = input_defs.size() > 2 && input_defs[2]->Exists();
bool has_input3 = input_defs.size() > 3 && input_defs[3]->Exists();
bool has_input4 = input_defs.size() > 3 && input_defs[4]->Exists();
bool has_input2 = TensorExists(input_defs, 2);
bool has_input3 = TensorExists(input_defs, 3);
bool has_input4 = TensorExists(input_defs, 4);

if (!GetType(*input_defs[0], input0_type, logger) ||
!GetType(*input_defs[1], input1_type, logger) ||
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ Status QDQOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
emscripten::val scale = model_builder.GetOperand(input_defs[1]->Name());
emscripten::val zero_point = emscripten::val::null();

if (input_defs.size() == 3 && input_defs[2]->Exists()) {
if (TensorExists(input_defs, 2)) {
zero_point = model_builder.GetOperand(node.InputDefs()[2]->Name());
has_zero_point = true;
} else {
Expand Down Expand Up @@ -159,7 +159,7 @@ bool QDQOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initial
int32_t input0_type = 0; // input data type
int32_t input1_type = 0; // x_scale data type
int32_t input2_type = 0; // x_zero_point data type
bool has_input2 = input_defs.size() > 2 && input_defs[2]->Exists();
bool has_input2 = TensorExists(input_defs, 2);

if (!GetType(*input_defs[0], input0_type, logger) ||
!GetType(*input_defs[1], input1_type, logger) ||
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ bool SliceOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& initiali
return false;

// If there is step < 0, check data type support of reverse.
if (input_defs.size() > 4 && input_defs[4]->Exists()) {
if (TensorExists(input_defs, 4)) {
std::vector<int64_t> steps;
if (!ReadIntArrayFrom1DTensor(*initializers.at(input_defs[4]->Name()), steps, logger))
return false;
Expand Down

0 comments on commit 49801bd

Please sign in to comment.