Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CoreML] Adapt to MLMultiArray.dataPointer deprecation #17726

Merged
merged 15 commits into from
Nov 18, 2023
Merged
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 22 additions & 2 deletions onnxruntime/core/providers/coreml/model/model.mm
NickLucche marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,14 @@ Status CreateInputFeatureProvider(const std::unordered_map<std::string, OnnxTens
conversion_buffers_out = std::move(conversion_buffers);
return Status::OK();
}

bool IsArrayContiguous(MLMultiArray* array) {
NickLucche marked this conversation as resolved.
Show resolved Hide resolved
int64_t batch_stride = [array.strides[0] longLongValue];
const auto* shape = array.shape;
int64_t batch_elems = 1;
for (int i = 1; i < [shape count]; i++) batch_elems *= [shape[i] longLongValue];
return batch_stride == batch_elems;
}
} // namespace

NS_ASSUME_NONNULL_BEGIN
Expand Down Expand Up @@ -324,7 +332,14 @@ - (Status)predict:(const std::unordered_map<std::string, OnnxTensorData>&)inputs
") do not match");
}

const void* model_output_buffer = data.dataPointer;
ORT_RETURN_IF_NOT(IsArrayContiguous(data),
"Non-contiguous output MLMultiArray is not currently supported");
__block const void* model_output_buffer = nil;
__block int64_t coreml_buffer_size = 0;
[data getBytesWithHandler:^(const void* bytes, NSInteger size) {
model_output_buffer = bytes;
coreml_buffer_size = size;
NickLucche marked this conversation as resolved.
Show resolved Hide resolved
}];

if (model_output_buffer == nullptr) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "model_output_buffer has no data for ", output_name);
Expand All @@ -334,11 +349,15 @@ - (Status)predict:(const std::unordered_map<std::string, OnnxTensorData>&)inputs
switch (onnx_data_type) {
case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: {
const auto output_data_byte_size = num_elements * sizeof(float);
ORT_RETURN_IF_NOT(coreml_buffer_size == output_data_byte_size,
"CoreML output buffer size and expected output size differ");
memcpy(output_buffer, model_output_buffer, output_data_byte_size);
break;
}
case ONNX_NAMESPACE::TensorProto_DataType_INT32: {
const auto output_data_byte_size = num_elements * sizeof(int32_t);
ORT_RETURN_IF_NOT(coreml_buffer_size == output_data_byte_size,
"CoreML output buffer size and expected output size differ");
memcpy(output_buffer, model_output_buffer, output_data_byte_size);
break;
}
Expand All @@ -348,7 +367,8 @@ - (Status)predict:(const std::unordered_map<std::string, OnnxTensorData>&)inputs
case ONNX_NAMESPACE::TensorProto_DataType_INT64: {
ORT_RETURN_IF_NOT(data.dataType == MLMultiArrayDataTypeInt32,
"CoreML output data type is not MLMultiArrayDataTypeInt32");

ORT_RETURN_IF_NOT(coreml_buffer_size == num_elements * sizeof(int32_t),
"CoreML output buffer size and expected output size differ");
const auto model_output_span = gsl::span{static_cast<const int32_t*>(model_output_buffer), num_elements};
const auto output_span = gsl::span{static_cast<int64_t*>(output_buffer), num_elements};
std::transform(model_output_span.begin(), model_output_span.end(), output_span.begin(),
Expand Down
Loading