Skip to content

Commit

Permalink
Merge branch 'main' into pad
Browse files Browse the repository at this point in the history
  • Loading branch information
xhcao committed Dec 20, 2024
2 parents dbe430f + 4aca8f3 commit 491e597
Show file tree
Hide file tree
Showing 96 changed files with 1,212 additions and 916 deletions.
2 changes: 1 addition & 1 deletion cgmanifests/generated/cgmanifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@
"component": {
"type": "git",
"git": {
"commitHash": "bc0d2e35909b8456abe32f3b30a49bb0c125e8b7",
"commitHash": "9c69a24bc2e20c8a511a4e6b06fd49639ec5300a",
"repositoryUrl": "https://github.com/onnx/onnx-tensorrt.git"
},
"comments": "onnx_tensorrt"
Expand Down
4 changes: 2 additions & 2 deletions cmake/deps.txt
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ microsoft_wil;https://github.com/microsoft/wil/archive/refs/tags/v1.0.230629.1.z
mimalloc;https://github.com/microsoft/mimalloc/archive/refs/tags/v2.1.1.zip;d5ee7d34223d0567892db5179849939c8769dc41
mp11;https://github.com/boostorg/mp11/archive/refs/tags/boost-1.82.0.zip;9bc9e01dffb64d9e0773b2e44d2f22c51aace063
onnx;https://github.com/onnx/onnx/archive/refs/tags/v1.16.1.zip;2eb9198bb352757d5ff13977cbe0634898e0837c
# Use the latest commit of 10.6-GA-ORT-DDS
onnx_tensorrt;https://github.com/onnx/onnx-tensorrt/archive/bc0d2e35909b8456abe32f3b30a49bb0c125e8b7.zip;f233ae871ad82c023da62e5dd620639f00bc2d15
# Use the latest commit of 10.7-GA
onnx_tensorrt;https://github.com/onnx/onnx-tensorrt/archive/9c69a24bc2e20c8a511a4e6b06fd49639ec5300a.zip;ff1fe9af78eb129b4a4cdcb7450b7390b4436dd3
protobuf;https://github.com/protocolbuffers/protobuf/archive/refs/tags/v21.12.zip;7cf2733949036c7d52fda017badcab093fe73bfa
protoc_win64;https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win64.zip;b4521f7ada5b260380f94c4bd7f1b7684c76969a
protoc_win32;https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win32.zip;3688010318192c46ce73213cdfb6b3e5656da874
Expand Down
1 change: 1 addition & 0 deletions cmake/onnxruntime.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ if(WIN32)
onnxruntime_add_shared_library(onnxruntime
${SYMBOL_FILE}
"${ONNXRUNTIME_ROOT}/core/dll/dllmain.cc"
"${ONNXRUNTIME_ROOT}/core/dll/delay_load_hook.cc"
"${ONNXRUNTIME_ROOT}/core/dll/onnxruntime.rc"
)
elseif(onnxruntime_BUILD_APPLE_FRAMEWORK)
Expand Down
20 changes: 17 additions & 3 deletions cmake/onnxruntime_nodejs.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,26 @@ else()
endif()
endif()

# a list of DLLs that the Node.js binding depends on
set(NODEJS_DLL_DEPS)

# setup providers
if (onnxruntime_USE_CUDA)
set(NODEJS_BINDING_USE_CUDA "--use_cuda")
endif()
if (onnxruntime_USE_DML)
set(NODEJS_BINDING_USE_DML "--use_dml")
list(APPEND NODEJS_DLL_DEPS "$<TARGET_FILE_DIR:onnxruntime>/DirectML.dll")
endif()
if (onnxruntime_USE_WEBGPU)
set(NODEJS_BINDING_USE_WEBGPU "--use_webgpu")
if (WIN32 AND onnxruntime_ENABLE_DAWN_BACKEND_D3D12)
list(APPEND NODEJS_DLL_DEPS "$<TARGET_FILE_DIR:dxcompiler>/dxil.dll")
list(APPEND NODEJS_DLL_DEPS "$<TARGET_FILE_DIR:dxcompiler>/dxcompiler.dll")
endif()
if (onnxruntime_BUILD_DAWN_MONOLITHIC_LIBRARY)
list(APPEND NODEJS_DLL_DEPS "$<TARGET_FILE:dawn::webgpu_dawn>")
endif()
endif()
if (onnxruntime_USE_TENSORRT)
set(NODEJS_BINDING_USE_TENSORRT "--use_tensorrt")
Expand All @@ -94,9 +105,12 @@ add_custom_target(js_common_npm_ci ALL

add_custom_target(nodejs_binding_wrapper ALL
COMMAND ${NPM_CLI} ci
COMMAND ${NPM_CLI} run build -- --onnxruntime-build-dir=${CMAKE_CURRENT_BINARY_DIR} --config=${CMAKE_BUILD_TYPE} --onnxruntime-generator=${CMAKE_GENERATOR}
--arch=${NODEJS_BINDING_ARCH} ${NODEJS_BINDING_USE_CUDA} ${NODEJS_BINDING_USE_DML} ${NODEJS_BINDING_USE_WEBGPU} ${NODEJS_BINDING_USE_TENSORRT}
${NODEJS_BINDING_USE_COREML} ${NODEJS_BINDING_USE_QNN}
COMMAND ${NPM_CLI} run build -- "--onnxruntime-build-dir=${CMAKE_CURRENT_BINARY_DIR}"
--config=${CMAKE_BUILD_TYPE}
"--onnxruntime-generator=${CMAKE_GENERATOR}"
"--dll_deps=${NODEJS_DLL_DEPS}"
--arch=${NODEJS_BINDING_ARCH} ${NODEJS_BINDING_USE_CUDA} ${NODEJS_BINDING_USE_DML} ${NODEJS_BINDING_USE_WEBGPU}
${NODEJS_BINDING_USE_TENSORRT} ${NODEJS_BINDING_USE_COREML} ${NODEJS_BINDING_USE_QNN}
WORKING_DIRECTORY ${JS_NODE_ROOT}
COMMENT "Using cmake-js to build OnnxRuntime Node.js binding")

Expand Down
36 changes: 27 additions & 9 deletions cmake/onnxruntime_providers_webgpu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -23,24 +23,42 @@
onnxruntime_add_include_to_target(onnxruntime_providers_webgpu
onnxruntime_common dawn::dawncpp_headers dawn::dawn_headers onnx onnx_proto flatbuffers::flatbuffers Boost::mp11 safeint_interface)

set(onnxruntime_providers_webgpu_dll_deps)

if (onnxruntime_BUILD_DAWN_MONOLITHIC_LIBRARY)
target_link_libraries(onnxruntime_providers_webgpu dawn::webgpu_dawn)

if (onnxruntime_ENABLE_DELAY_LOADING_WIN_DLLS)
list(APPEND onnxruntime_DELAYLOAD_FLAGS "/DELAYLOAD:webgpu_dawn.dll")
endif()
if (WIN32)
if (onnxruntime_ENABLE_DELAY_LOADING_WIN_DLLS)
list(APPEND onnxruntime_DELAYLOAD_FLAGS "/DELAYLOAD:webgpu_dawn.dll")
endif()

# Copy webgpu_dawn.dll to the output directory
add_custom_command(
TARGET onnxruntime_providers_webgpu
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different "$<TARGET_FILE:dawn::webgpu_dawn>" "$<TARGET_FILE_DIR:onnxruntime_providers_webgpu>"
VERBATIM )
list(APPEND onnxruntime_providers_webgpu_dll_deps "$<TARGET_FILE:dawn::webgpu_dawn>")
endif()
else()
if (NOT onnxruntime_USE_EXTERNAL_DAWN)
target_link_libraries(onnxruntime_providers_webgpu dawn::dawn_native)
endif()
target_link_libraries(onnxruntime_providers_webgpu dawn::dawn_proc)
endif()

if (WIN32 AND onnxruntime_ENABLE_DAWN_BACKEND_D3D12)
# Ensure dxil.dll and dxcompiler.dll exist in the output directory $<TARGET_FILE_DIR:dxcompiler>
add_dependencies(onnxruntime_providers_webgpu copy_dxil_dll)
add_dependencies(onnxruntime_providers_webgpu dxcompiler)

list(APPEND onnxruntime_providers_webgpu_dll_deps "$<TARGET_FILE_DIR:dxcompiler>/dxil.dll")
list(APPEND onnxruntime_providers_webgpu_dll_deps "$<TARGET_FILE_DIR:dxcompiler>/dxcompiler.dll")
endif()

if (onnxruntime_providers_webgpu_dll_deps)
# Copy dependency DLLs to the output directory
add_custom_command(
TARGET onnxruntime_providers_webgpu
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different "${onnxruntime_providers_webgpu_dll_deps}" "$<TARGET_FILE_DIR:onnxruntime_providers_webgpu>"
COMMAND_EXPAND_LISTS
VERBATIM )
endif()

set_target_properties(onnxruntime_providers_webgpu PROPERTIES FOLDER "ONNXRuntime")
12 changes: 12 additions & 0 deletions cmake/onnxruntime_unittests.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,9 @@ set (onnxruntime_global_thread_pools_test_SRC
set (onnxruntime_webgpu_external_dawn_test_SRC
${TEST_SRC_DIR}/webgpu/external_dawn/main.cc)

set (onnxruntime_webgpu_delay_load_test_SRC
${TEST_SRC_DIR}/webgpu/delay_load/main.cc)

# tests from lowest level library up.
# the order of libraries should be maintained, with higher libraries being added first in the list

Expand Down Expand Up @@ -1864,4 +1867,13 @@ if (onnxruntime_USE_WEBGPU AND onnxruntime_USE_EXTERNAL_DAWN)
onnxruntime_add_include_to_target(onnxruntime_webgpu_external_dawn_test dawn::dawncpp_headers dawn::dawn_headers)
endif()

if (onnxruntime_USE_WEBGPU AND WIN32 AND onnxruntime_BUILD_SHARED_LIB AND NOT CMAKE_SYSTEM_NAME STREQUAL "Emscripten" AND NOT onnxruntime_MINIMAL_BUILD)
AddTest(DYN
TARGET onnxruntime_webgpu_delay_load_test
SOURCES ${onnxruntime_webgpu_delay_load_test_SRC}
LIBS ${SYS_PATH_LIB}
DEPENDS ${all_dependencies}
)
endif()

include(onnxruntime_fuzz_test.cmake)
2 changes: 1 addition & 1 deletion docs/ContribOperators.md
Original file line number Diff line number Diff line change
Expand Up @@ -1625,7 +1625,7 @@ This version of the operator has been available since version 1 of the 'com.micr
#### Type Constraints

<dl>
<dt><tt>T</tt> : tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(float16), tensor(float), tensor(double)</dt>
<dt><tt>T</tt> : tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16)</dt>
<dd>Constrain input and output types.</dd>
</dl>

Expand Down
1 change: 0 additions & 1 deletion java/src/test/java/ai/onnxruntime/InferenceTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -737,7 +737,6 @@ public void testCoreML() throws OrtException {
runProvider(OrtProvider.CORE_ML);
}

@Disabled("DirectML Java API hasn't been supported yet")
@Test
@EnabledIfSystemProperty(named = "USE_DML", matches = "1")
public void testDirectML() throws OrtException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,13 @@
import java.util.HashMap;
import java.util.Map;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.DisabledIfSystemProperty;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;

public class ProviderOptionsTest {
private static final OrtEnvironment env = TestHelpers.getOrtEnvironment();

@Test
@EnabledIfSystemProperty(named = "USE_CUDA", matches = "1")
@DisabledIfSystemProperty(named = "NO_CUDA_TEST", matches = "1")
public void testCUDAOptions() throws OrtException {
// Test standard options
OrtCUDAProviderOptions cudaOpts = new OrtCUDAProviderOptions(0);
Expand Down Expand Up @@ -63,7 +61,6 @@ public void testCUDAOptions() throws OrtException {

@Test
@EnabledIfSystemProperty(named = "USE_TENSORRT", matches = "1")
@DisabledIfSystemProperty(named = "NO_CUDA_TEST", matches = "1")
public void testTensorRT() throws OrtException {
// Test standard options
OrtTensorRTProviderOptions rtOpts = new OrtTensorRTProviderOptions(0);
Expand Down
10 changes: 6 additions & 4 deletions js/node/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,12 @@ endif()
if (WIN32)
file(COPY ${ONNXRUNTIME_WIN_BIN_DIR}/onnxruntime.dll
DESTINATION ${dist_folder})
if (USE_DML)
file(COPY ${ONNXRUNTIME_WIN_BIN_DIR}/DirectML.dll
DESTINATION ${dist_folder})
endif ()
if (ORT_NODEJS_DLL_DEPS)
foreach(dll ${ORT_NODEJS_DLL_DEPS})
file(COPY ${dll} DESTINATION ${dist_folder})
endforeach()
endif()

elseif (APPLE)
file(COPY ${ONNXRUNTIME_BUILD_DIR}/libonnxruntime.dylib
DESTINATION ${dist_folder} FOLLOW_SYMLINK_CHAIN)
Expand Down
5 changes: 5 additions & 0 deletions js/node/script/build.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ const USE_TENSORRT = !!buildArgs.use_tensorrt;
const USE_COREML = !!buildArgs.use_coreml;
// --use_qnn
const USE_QNN = !!buildArgs.use_qnn;
// --dll_deps=
const DLL_DEPS = buildArgs.dll_deps;

// build path
const ROOT_FOLDER = path.join(__dirname, '..');
Expand Down Expand Up @@ -82,6 +84,9 @@ if (USE_COREML) {
if (USE_QNN) {
args.push('--CDUSE_QNN=ON');
}
if (DLL_DEPS) {
args.push(`--CDORT_NODEJS_DLL_DEPS=${DLL_DEPS}`);
}

// set CMAKE_OSX_ARCHITECTURES for macOS build
if (os.platform() === 'darwin') {
Expand Down
37 changes: 0 additions & 37 deletions js/node/src/directml_load_helper.cc

This file was deleted.

6 changes: 0 additions & 6 deletions js/node/src/directml_load_helper.h

This file was deleted.

4 changes: 0 additions & 4 deletions js/node/src/inference_session_wrap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#include "onnxruntime_cxx_api.h"

#include "common.h"
#include "directml_load_helper.h"
#include "inference_session_wrap.h"
#include "run_options_helper.h"
#include "session_options_helper.h"
Expand All @@ -19,9 +18,6 @@ Napi::FunctionReference& InferenceSessionWrap::GetTensorConstructor() {
}

Napi::Object InferenceSessionWrap::Init(Napi::Env env, Napi::Object exports) {
#if defined(USE_DML) && defined(_WIN32)
LoadDirectMLDll(env);
#endif
// create ONNX runtime env
Ort::InitApi();
ORT_NAPI_THROW_ERROR_IF(
Expand Down
55 changes: 34 additions & 21 deletions js/node/src/tensor_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,24 +53,24 @@ constexpr size_t DATA_TYPE_ELEMENT_SIZE_MAP[] = {
static_assert(sizeof(DATA_TYPE_ELEMENT_SIZE_MAP) == sizeof(size_t) * ONNX_TENSOR_ELEMENT_DATA_TYPE_COUNT,
"definition not matching");

constexpr napi_typedarray_type DATA_TYPE_TYPEDARRAY_MAP[] = {
(napi_typedarray_type)(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED not supported
napi_float32_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT
napi_uint8_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8
napi_int8_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8
napi_uint16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16
napi_int16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16
napi_int32_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32
napi_bigint64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64
(napi_typedarray_type)(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING not supported
napi_uint8_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL
napi_uint16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 FLOAT16 uses Uint16Array
napi_float64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE
napi_uint32_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32
napi_biguint64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64
(napi_typedarray_type)(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64 not supported
(napi_typedarray_type)(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128 not supported
(napi_typedarray_type)(-1) // ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16 not supported
constexpr std::underlying_type_t<napi_typedarray_type> DATA_TYPE_TYPEDARRAY_MAP[] = {
std::underlying_type_t<napi_typedarray_type>(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED not supported
napi_float32_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT
napi_uint8_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8
napi_int8_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8
napi_uint16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16
napi_int16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16
napi_int32_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32
napi_bigint64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64
std::underlying_type_t<napi_typedarray_type>(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING not supported
napi_uint8_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL
napi_uint16_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 FLOAT16 uses Uint16Array
napi_float64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE
napi_uint32_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32
napi_biguint64_array, // ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64
std::underlying_type_t<napi_typedarray_type>(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64 not supported
std::underlying_type_t<napi_typedarray_type>(-1), // ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128 not supported
std::underlying_type_t<napi_typedarray_type>(-1) // ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16 not supported
};
static_assert(sizeof(DATA_TYPE_TYPEDARRAY_MAP) == sizeof(napi_typedarray_type) * ONNX_TENSOR_ELEMENT_DATA_TYPE_COUNT,
"definition not matching");
Expand Down Expand Up @@ -98,7 +98,20 @@ static_assert(sizeof(DATA_TYPE_ID_TO_NAME_MAP) == sizeof(const char*) * ONNX_TEN
"definition not matching");

const std::unordered_map<std::string, ONNXTensorElementDataType> DATA_TYPE_NAME_TO_ID_MAP = {
{"float32", ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT}, {"uint8", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8}, {"int8", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8}, {"uint16", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16}, {"int16", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16}, {"int32", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32}, {"int64", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64}, {"string", ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING}, {"bool", ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL}, {"float16", ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16}, {"float64", ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE}, {"uint32", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32}, {"uint64", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64}};
{"float32", ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT},
{"uint8", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8},
{"int8", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8},
{"uint16", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16},
{"int16", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16},
{"int32", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32},
{"int64", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64},
{"string", ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING},
{"bool", ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL},
{"float16", ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16},
{"float64", ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE},
{"uint32", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32},
{"uint64", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64},
};

// currently only support tensor
Ort::Value NapiValueToOrtValue(Napi::Env env, Napi::Value value, OrtMemoryInfo* cpu_memory_info, OrtMemoryInfo* webgpu_memory_info) {
Expand Down Expand Up @@ -181,7 +194,7 @@ Ort::Value NapiValueToOrtValue(Napi::Env env, Napi::Value value, OrtMemoryInfo*
"Tensor.data must be a typed array for numeric tensor.");

auto tensorDataTypedArray = tensorDataValue.As<Napi::TypedArray>();
auto typedArrayType = tensorDataValue.As<Napi::TypedArray>().TypedArrayType();
std::underlying_type_t<napi_typedarray_type> typedArrayType = tensorDataValue.As<Napi::TypedArray>().TypedArrayType();
ORT_NAPI_THROW_TYPEERROR_IF(DATA_TYPE_TYPEDARRAY_MAP[elemType] != typedArrayType, env,
"Tensor.data must be a typed array (", DATA_TYPE_TYPEDARRAY_MAP[elemType], ") for ",
tensorTypeString, " tensors, but got typed array (", typedArrayType, ").");
Expand Down Expand Up @@ -294,7 +307,7 @@ Napi::Value OrtValueToNapiValue(Napi::Env env, Ort::Value&& value) {
}
napi_value typedArrayData;
napi_status status =
napi_create_typedarray(env, DATA_TYPE_TYPEDARRAY_MAP[elemType], size, arrayBuffer, 0, &typedArrayData);
napi_create_typedarray(env, (napi_typedarray_type)DATA_TYPE_TYPEDARRAY_MAP[elemType], size, arrayBuffer, 0, &typedArrayData);
NAPI_THROW_IF_FAILED(env, status, Napi::Value);

// new Tensor(type, typedArrayData, dims)
Expand Down
Loading

0 comments on commit 491e597

Please sign in to comment.