diff --git a/cmake/onnxruntime_providers_coreml.cmake b/cmake/onnxruntime_providers_coreml.cmake index f7d6d0ba766dd..18508605726ef 100644 --- a/cmake/onnxruntime_providers_coreml.cmake +++ b/cmake/onnxruntime_providers_coreml.cmake @@ -191,11 +191,11 @@ endif() if (APPLE) target_compile_definitions(onnxruntime_providers_coreml PRIVATE __REALLY_IS_APPLE__) -else() - # TEMP test - target_compile_definitions(onnxruntime_providers_coreml PRIVATE __APPLE__OR__TEST__) endif() +# TEMP test +target_compile_definitions(onnxruntime_providers_coreml PRIVATE __APPLE__OR__TEST__) + if (_BUILD_COREMLTOOLS) # copied from external/xnnpack.cmake # diff --git a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc index 5be7e7c93da62..9c03b29a7817b 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc @@ -164,9 +164,11 @@ void SetTensorTypeInfo(MILSpec::TensorType& tensor_type, MILSpec::DataType data_ } template -void CopyDataToTensorValue(MILSpec::TensorValue& tensor_value, const gsl::span& data) { - static_assert(false, "Unsupported data type"); // add specializations below as needed +void CopyDataToTensorValue(MILSpec::TensorValue& tensor_value, const gsl::span data) { + // need a 'false' that is dependent on the template types to make gcc happy and give a meaningful error message. + static_assert(false_for_T && false_for_T, "Unsupported data type"); // add specializations below as needed } + template <> void CopyDataToTensorValue(MILSpec::TensorValue& tensor_value, const gsl::span& data) { tensor_value.mutable_floats()->mutable_values()->Add(data.begin(), data.end()); diff --git a/onnxruntime/core/providers/coreml/builders/model_builder.cc b/onnxruntime/core/providers/coreml/builders/model_builder.cc index 089e3d6d9cc4b..e4870af991948 100644 --- a/onnxruntime/core/providers/coreml/builders/model_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/model_builder.cc @@ -442,9 +442,10 @@ void ModelBuilder::AddLayer(std::unique_ptr layer) { * ML Program related helpers */ -std::unique_ptr ModelBuilder::CreateOperation(const Node& node, std::string_view op_type, +std::unique_ptr ModelBuilder::CreateOperation(const Node& node, + std::string_view op_type, std::string_view suffix) { - auto operation_name = GetUniqueName(node, suffix); + std::string operation_name = GetUniqueName(node, suffix); std::unique_ptr op = std::make_unique(); op->set_type(std::string(op_type)); diff --git a/onnxruntime/core/providers/coreml/coreml_execution_provider.h b/onnxruntime/core/providers/coreml/coreml_execution_provider.h index 641e5afe1023e..59eb5375bf379 100644 --- a/onnxruntime/core/providers/coreml/coreml_execution_provider.h +++ b/onnxruntime/core/providers/coreml/coreml_execution_provider.h @@ -25,11 +25,11 @@ class CoreMLExecutionProvider : public IExecutionProvider { std::vector& node_compute_funcs) override; #endif + private: // The bit flags which define bool options for COREML EP, bits are defined as // COREMLFlags in include/onnxruntime/core/providers/coreml/coreml_provider_factory.h const uint32_t coreml_flags_; - private: const int32_t coreml_version_; // > #ifdef __APPLE__OR__TEST__ diff --git a/onnxruntime/core/providers/coreml/model/model_stub.cc b/onnxruntime/core/providers/coreml/model/model_stub.cc index 824d6cb872c2a..fdf9583df1472 100644 --- a/onnxruntime/core/providers/coreml/model/model_stub.cc +++ b/onnxruntime/core/providers/coreml/model/model_stub.cc @@ -16,7 +16,8 @@ Model::Model(const std::string& /*path*/, : execution_(std::make_unique()) { } -Model::~Model() {} +Model::~Model() { +} Status Model::LoadModel() { return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Loading a CoreML model is not supported on this platform.");