From 337cc56d6f205ee5bb73c4aadad94f2e82fdadd4 Mon Sep 17 00:00:00 2001 From: Scott McKay Date: Thu, 27 Jun 2024 02:54:36 +1000 Subject: [PATCH] Convert scalars to 1D to satisfy ML Program requirements. (#21159) ### Description Convert scalars to 1D to satisfy ML Program requirements. https://dev.azure.com/onnxruntime/onnxruntime/_build/results?buildId=1418617&view=logs&j=f7cc61a9-cc70-56e7-b06c-4668ca17e426&t=16d281b5-1bfd-5309-f274-36d0dffd9cb1&l=27167 ### Motivation and Context Fixes test failure in #17361 --- .../coreml/builders/impl/builder_utils.cc | 48 ++++++++++++------- .../coreml/builders/impl/builder_utils.h | 4 +- .../coreml/builders/model_builder.cc | 7 +-- 3 files changed, 35 insertions(+), 24 deletions(-) diff --git a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc index cbea969904ed5..2fcf9a1d7d9ba 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc @@ -140,30 +140,44 @@ void CreateCoreMLWeight(CoreML::Specification::WeightParams& weight, gsl::span> shape) { + std::optional> shape, bool convert_scalar = false) { tensor_type.set_datatype(data_type); if (shape) { - tensor_type.set_rank(shape->size()); - for (const auto& dim : *shape) { - if (dim >= 0) { - tensor_type.add_dimensions()->mutable_constant()->set_size(narrow(dim)); - } else { - tensor_type.add_dimensions()->mutable_unknown()->set_variadic(false); + auto rank = shape->size(); + if (convert_scalar && rank == 0) { + // CoreML scalar has shape {1} + tensor_type.set_rank(1); + tensor_type.add_dimensions()->mutable_constant()->set_size(1); + } else { + tensor_type.set_rank(rank); + for (const auto& dim : *shape) { + if (dim >= 0) { + tensor_type.add_dimensions()->mutable_constant()->set_size(narrow(dim)); + } else { + tensor_type.add_dimensions()->mutable_unknown()->set_variadic(false); + } } } } } void SetTensorTypeInfo(MILSpec::TensorType& tensor_type, MILSpec::DataType data_type, - const ONNX_NAMESPACE::TensorShapeProto* shape) { + const ONNX_NAMESPACE::TensorShapeProto* shape, bool convert_scalar = false) { tensor_type.set_datatype(data_type); if (shape) { - tensor_type.set_rank(shape->dim_size()); - for (const auto& dim : shape->dim()) { - if (dim.has_dim_value()) { - tensor_type.add_dimensions()->mutable_constant()->set_size(narrow(dim.dim_value())); - } else { - tensor_type.add_dimensions()->mutable_unknown()->set_variadic(false); + auto rank = shape->dim_size(); + if (convert_scalar && rank == 0) { + // CoreML scalar has shape {1} + tensor_type.set_rank(1); + tensor_type.add_dimensions()->mutable_constant()->set_size(1); + } else { + tensor_type.set_rank(rank); + for (const auto& dim : shape->dim()) { + if (dim.has_dim_value()) { + tensor_type.add_dimensions()->mutable_constant()->set_size(narrow(dim.dim_value())); + } else { + tensor_type.add_dimensions()->mutable_unknown()->set_variadic(false); + } } } } @@ -281,13 +295,13 @@ template MILSpec::Value CreateScalarTensorValue(const int32_t& data); template MILSpec::Value CreateScalarTensorValue(const std::string& data); template MILSpec::Value CreateScalarTensorValue(const bool& data); -COREML_SPEC::MILSpec::NamedValueType CreateNamedTensorValueType(const NodeArg& node_arg) { +COREML_SPEC::MILSpec::NamedValueType CreateNamedTensorValueType(const NodeArg& node_arg, bool convert_scalar) { MILSpec::NamedValueType nvt; nvt.set_name(node_arg.Name()); MILSpec::TensorType& tensor_type = *nvt.mutable_type()->mutable_tensortype(); SetTensorTypeInfo(tensor_type, OnnxDataTypeToMILSpec(node_arg.TypeAsProto()->tensor_type().elem_type()), - node_arg.Shape()); + node_arg.Shape(), convert_scalar); return nvt; } @@ -308,7 +322,7 @@ void AddOperationOutput(COREML_SPEC::MILSpec::Operation& op, const NodeArg& outp MILSpec::TensorType& tensor_type = *value.mutable_tensortype(); SetTensorTypeInfo(tensor_type, OnnxDataTypeToMILSpec(output.TypeAsProto()->tensor_type().elem_type()), - output.Shape()); + output.Shape(), /*convert_scalar*/ true); } void AddPadTypeAndPads(COREML_SPEC::MILSpec::Operation& op, ModelBuilder& model_builder, std::string_view op_type, diff --git a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h index 2804589065631..3e6c43ab07867 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h +++ b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h @@ -114,8 +114,10 @@ template COREML_SPEC::MILSpec::Value CreateScalarTensorValue(const T& data); /// Create a NamedValueType from an ONNX tensor NodeArg. +/// NodeArg to create NamedValueType from. +/// If true, scalar shapes are converted to 1D. /// Used to create inputs for the 'main' function in an ML Program. -COREML_SPEC::MILSpec::NamedValueType CreateNamedTensorValueType(const NodeArg& node_arg); +COREML_SPEC::MILSpec::NamedValueType CreateNamedTensorValueType(const NodeArg& node_arg, bool convert_scalar = false); /// /// Add an input argument to a MILSpec::Operation diff --git a/onnxruntime/core/providers/coreml/builders/model_builder.cc b/onnxruntime/core/providers/coreml/builders/model_builder.cc index eb4723a3b9746..88b518ab2289c 100644 --- a/onnxruntime/core/providers/coreml/builders/model_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/model_builder.cc @@ -838,13 +838,8 @@ Status ModelBuilder::RegisterModelInputOutput(const NodeArg& node_arg, bool is_i if (create_ml_program_) { if (is_input) { // the model inputs need to be wired up as args to the 'main' function. - auto tensor_value_type = CreateNamedTensorValueType(node_arg); + auto tensor_value_type = CreateNamedTensorValueType(node_arg, /*convert_scalar*/ true); tensor_value_type.set_name(name); - if (node_arg.Shape()->dim_size() == 0) { - // update shape from {} to {1} (same change we made at the model input level above). - tensor_value_type.mutable_type()->mutable_tensortype()->set_rank(1); - tensor_value_type.mutable_type()->mutable_tensortype()->add_dimensions()->mutable_constant()->set_size(1); - } mlprogram_main_fn_->mutable_inputs()->Add(std::move(tensor_value_type)); } else {