Skip to content

Commit

Permalink
Merge branch 'main' into carzh/export_for_inference_test
Browse files Browse the repository at this point in the history
  • Loading branch information
carzh committed Jul 30, 2024
2 parents 32e321d + 0d7cf30 commit 331f9aa
Show file tree
Hide file tree
Showing 76 changed files with 1,903 additions and 597 deletions.
3 changes: 0 additions & 3 deletions cmake/external/onnxruntime_external_deps.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,6 @@ if (onnxruntime_BUILD_UNIT_TESTS)
if (CMAKE_SYSTEM_NAME STREQUAL "Emscripten")
set(gtest_disable_pthreads ON)
endif()
if (${CMAKE_SYSTEM_NAME} MATCHES "AIX")
set(gtest_disable_pthreads ON CACHE BOOL "gtest_disable_pthreads" FORCE)
endif()
set(INSTALL_GTEST OFF CACHE BOOL "" FORCE)
if (IOS OR ANDROID)
# on mobile platforms the absl flags class dumps the flag names (assumably for binary size), which breaks passing
Expand Down
383 changes: 383 additions & 0 deletions cmake/patches/onnx/onnx.patch

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

<ItemGroup>
<PackageReference Include="Microsoft.ML.OnnxRuntime" Version="1.16.3" />
<PackageReference Include="Sixlabors.ImageSharp" Version="2.1.8" />
<PackageReference Include="Sixlabors.ImageSharp" Version="2.1.9" />
</ItemGroup>

</Project>
67 changes: 39 additions & 28 deletions csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,51 @@
</PropertyGroup>

<PropertyGroup Condition="'$(OrtPackageId)' == 'Microsoft.ML.OnnxRuntime.Training' AND
'$(IncludeMobileTargets)' == 'true'">
'$(IncludeMobileTargets)' == 'true'">
<MobileTargets>net8.0-android</MobileTargets>
</PropertyGroup>

<PropertyGroup>
<TargetFrameworks>$(BaseTargets);$(MobileTargets)</TargetFrameworks>
</PropertyGroup>

<PropertyGroup>
<RootNamespace>Microsoft.ML.OnnxRuntime</RootNamespace>
<AssemblyName>Microsoft.ML.OnnxRuntime</AssemblyName>

<!-- Ignore any passed in value unless this is a release build. -->
<PackageVersion Condition="'$(IsReleaseBuild)' != 'true'"></PackageVersion>
<PackageVersion Condition="'$(PackageVersion)' == '' And '$(Configuration)' == 'Debug'">1.0.0</PackageVersion>
<PackageVersion Condition="'$(PackageVersion)' == '' ">0.0.0</PackageVersion>

<!-- Set the attributes for the managed dll -->
<!-- https://learn.microsoft.com/en-us/dotnet/standard/assembly/set-attributes-project-file -->
<GenerateAssemblyInfo>true</GenerateAssemblyInfo>
<AssemblyTitle>Microsoft.ML.OnnxRuntime C# Bindings</AssemblyTitle>
<Company>Microsoft</Company>
<Copyright>© Microsoft Corporation. All rights reserved.</Copyright>
<Description>This package contains ONNX Runtime for .Net platforms</Description>

<!-- NOTE: this is also used as the default for AssemblyVersion and FileVersion -->
<Version>$(PackageVersion)</Version>

<!-- Set the attributes for a nuget package -->
<!--- The package name is always hardcoded as the package created by this project only contains managed assemblies -->
<!--- The parameter OrtPackageId is only used for some conditional logic below -->
<Authors>Microsoft</Authors>
<PackageId>Microsoft.ML.OnnxRuntime.Managed</PackageId>
<PackageTags>ONNX;ONNX Runtime;Machine Learning</PackageTags>
<PackageProjectUrl>https://github.com/Microsoft/onnxruntime</PackageProjectUrl>
<PackageLicenseFile>LICENSE.txt</PackageLicenseFile>
<PackageIcon>ORT_icon_for_light_bg.png</PackageIcon>
<PackageReleaseNotes>
Release Def:
Branch: $(BUILD_SOURCEBRANCH)
Commit: $(BUILD_SOURCEVERSION)
Build: https://aiinfra.visualstudio.com/Lotus/_build/results?buildId=$(BUILD_BUILDID)
</PackageReleaseNotes>
</PropertyGroup>

<PropertyGroup>
<Platforms>AnyCPU;x86</Platforms>
<LangVersion>default</LangVersion>
Expand All @@ -43,8 +80,6 @@
<OnnxRuntimeCsharpRoot>$(OnnxRuntimeRoot)\csharp</OnnxRuntimeCsharpRoot>
<TargetArchitecture Condition=" '$(TargetArchitecture)' == '' ">x64</TargetArchitecture>

<RootNamespace>Microsoft.ML.OnnxRuntime</RootNamespace>
<AssemblyName>Microsoft.ML.OnnxRuntime</AssemblyName>
<EnableDefaultItems>false</EnableDefaultItems>
<EnableDefaultCompileItems>false</EnableDefaultCompileItems>
<DebugType>portable</DebugType>
Expand All @@ -54,35 +89,15 @@
on their device is not built for training, an exception will be thrown with the following message -
"Training is disabled in the current build. Please build onnxruntime from source with the build flags
enable_training_apis. "-->
<EnableTrainingApis Condition="'$(EnableTrainingApis)' == ''">true</EnableTrainingApis>
<EnableTrainingApis Condition="'$(EnableTrainingApis)' == ''">true</EnableTrainingApis>

<!--- The package name is always hardcoded as the package created by this project only contains managed assemblies -->
<!--- The parameter OrtPackageId is only used for some conditional logic below -->
<PackageId>Microsoft.ML.OnnxRuntime.Managed</PackageId>
<Authors>Microsoft</Authors>
<PackageVersion Condition=" '$(PackageVersion)' == '' And '$(Configuration)' == 'Debug' ">1.0.0</PackageVersion>
<PackageVersion Condition=" '$(PackageVersion)' == '' ">0.0.0</PackageVersion>
<Version>$(PackageVersion)</Version>
<Description>This package contains ONNX Runtime for .Net platforms</Description>
<PackageTags>ONNX;ONNX Runtime;Machine Learning</PackageTags>
<PackageProjectUrl>https://github.com/Microsoft/onnxruntime</PackageProjectUrl>
<Copyright>© Microsoft Corporation. All rights reserved.</Copyright>
<PackageLicenseFile>LICENSE.txt</PackageLicenseFile>
<PackageIcon>ORT_icon_for_light_bg.png</PackageIcon>
<PackageReleaseNotes>
Release Def:
Branch: $(BUILD_SOURCEBRANCH)
Commit: $(BUILD_SOURCEVERSION)
Build: https://aiinfra.visualstudio.com/Lotus/_build/results?buildId=$(BUILD_BUILDID)
</PackageReleaseNotes>
<!-- sourcelink flags -->
<PublishRepositoryUrl>true</PublishRepositoryUrl>

<!-- Optional: Embed source files that are not tracked by the source control manager in the PDB -->
<!--EmbedUntrackedSources>true</EmbedUntrackedSources-->

<GenerateTargetFrameworkAttribute>false</GenerateTargetFrameworkAttribute>
<GenerateAssemblyInfo>false</GenerateAssemblyInfo>
<AllowedOutputExtensionsInPackageBuildOutputFolder>$(AllowedOutputExtensionsInPackageBuildOutputFolder);.pdb</AllowedOutputExtensionsInPackageBuildOutputFolder>
<Configurations>Debug;Release;RelWithDebInfo</Configurations>

Expand Down Expand Up @@ -158,10 +173,6 @@
<OrtConstants>$(OrtConstants);__ENABLE_COREML__</OrtConstants>
</PropertyGroup>

<PropertyGroup Condition="'$(IsXamarinTarget)'=='true'">
<OrtConstants>$(OrtConstants);__XAMARIN__</OrtConstants>
</PropertyGroup>

<PropertyGroup>
<DefineConstants>$(DefineConstants);$(OrtConstants)</DefineConstants>
</PropertyGroup>
Expand Down
2 changes: 1 addition & 1 deletion include/onnxruntime/core/session/onnxruntime_cxx_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -2216,7 +2216,7 @@ struct ShapeInferContext {

size_t GetInputCount() const { return input_shapes_.size(); }

Status SetOutputShape(size_t indice, const Shape& shape);
Status SetOutputShape(size_t indice, const Shape& shape, ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT);

int64_t GetAttrInt(const char* attr_name);

Expand Down
3 changes: 2 additions & 1 deletion include/onnxruntime/core/session/onnxruntime_cxx_inline.h
Original file line number Diff line number Diff line change
Expand Up @@ -1998,9 +1998,10 @@ inline ShapeInferContext::ShapeInferContext(const OrtApi* ort_api,
}
}

inline Status ShapeInferContext::SetOutputShape(size_t indice, const Shape& shape) {
inline Status ShapeInferContext::SetOutputShape(size_t indice, const Shape& shape, ONNXTensorElementDataType type) {
OrtTensorTypeAndShapeInfo* info = {};
ORT_CXX_RETURN_ON_API_FAIL(ort_api_->CreateTensorTypeAndShapeInfo(&info));
ORT_CXX_RETURN_ON_API_FAIL(ort_api_->SetTensorElementType(info, type));

using InfoPtr = std::unique_ptr<OrtTensorTypeAndShapeInfo, std::function<void(OrtTensorTypeAndShapeInfo*)>>;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,10 @@ static const char* const kOrtSessionOptionEpContextFilePath = "ep.context_file_p
// "1": dump the EP context into the Onnx model. (default).
static const char* const kOrtSessionOptionEpContextEmbedMode = "ep.context_embed_mode";

// Specify the EPContext node name prefix to make it unique
// in case user need to merge/connect multiple EPContext nodes in one model
static const char* const kOrtSessionOptionEpContextNodeNamePrefix = "ep.context_node_name_prefix";

// Gemm fastmath mode provides fp32 gemm acceleration with bfloat16 based matmul.
// Option values:
// - "0": Gemm FastMath mode is not enabled. [DEFAULT]
Expand Down
16 changes: 7 additions & 9 deletions js/node/src/tensor_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,13 @@ constexpr size_t DATA_TYPE_ELEMENT_SIZE_MAP[] = {
2, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16
2, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16
4, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32
8, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64 INT64 not working in Javascript
8, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64
0, // ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING N/A
1, // ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL
0, // ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 FLOAT16 not working in Javascript
2, // ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16
8, // ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE
4, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32
8, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64 UINT64 not working in Javascript
8, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64
0, // ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64 not supported
0, // ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128 not supported
0 // ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16 not supported
Expand All @@ -60,13 +60,13 @@ constexpr napi_typedarray_type DATA_TYPE_TYPEDARRAY_MAP[] = {
napi_uint16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16
napi_int16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16
napi_int32_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32
napi_bigint64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64 INT64 not working i
napi_bigint64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64
(napi_typedarray_type)(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING not supported
napi_uint8_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL
(napi_typedarray_type)(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 FLOAT16 not working
napi_uint16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 FLOAT16 uses Uint16Array
napi_float64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE
napi_uint32_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32
napi_biguint64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64 UINT64 not working
napi_biguint64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64
(napi_typedarray_type)(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64 not supported
(napi_typedarray_type)(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128 not supported
(napi_typedarray_type)(-1) // ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16 not supported
Expand Down Expand Up @@ -182,9 +182,7 @@ Ort::Value NapiValueToOrtValue(Napi::Env env, Napi::Value value, OrtMemoryInfo *

char *buffer = reinterpret_cast<char *>(tensorDataTypedArray.ArrayBuffer().Data());
size_t bufferByteOffset = tensorDataTypedArray.ByteOffset();
// there is a bug in TypedArray::ElementSize(): https://github.com/nodejs/node-addon-api/pull/705
// TODO: change to TypedArray::ByteLength() in next node-addon-api release.
size_t bufferByteLength = tensorDataTypedArray.ElementLength() * DATA_TYPE_ELEMENT_SIZE_MAP[elemType];
size_t bufferByteLength = tensorDataTypedArray.ByteLength();
return Ort::Value::CreateTensor(memory_info, buffer + bufferByteOffset, bufferByteLength,
dims.empty() ? nullptr : &dims[0], dims.size(), elemType);
}
Expand Down
Loading

0 comments on commit 331f9aa

Please sign in to comment.