From a5769f0421410c22e1acbb790254b924bd4531cf Mon Sep 17 00:00:00 2001 From: Xu Xing Date: Thu, 12 Oct 2023 21:32:24 +0800 Subject: [PATCH] Support int64 --- cmake/onnxruntime_webassembly.cmake | 1 + js/web/package.json | 2 +- .../debug_node_inputs_outputs_utils.cc | 54 +++++++++++++++---- .../core/framework/sequential_executor.cc | 3 +- 4 files changed, 48 insertions(+), 12 deletions(-) diff --git a/cmake/onnxruntime_webassembly.cmake b/cmake/onnxruntime_webassembly.cmake index c6510c97a617e..dc8c803bd315c 100644 --- a/cmake/onnxruntime_webassembly.cmake +++ b/cmake/onnxruntime_webassembly.cmake @@ -265,6 +265,7 @@ else() if (onnxruntime_USE_WEBNN) set_property(TARGET onnxruntime_webassembly APPEND_STRING PROPERTY LINK_FLAGS " --bind -sWASM_BIGINT") endif() + set_property(TARGET onnxruntime_webassembly APPEND_STRING PROPERTY LINK_FLAGS " --bind -sWASM_BIGINT") # Set link flag to enable exceptions support, this will override default disabling exception throwing behavior when disable exceptions. target_link_options(onnxruntime_webassembly PRIVATE "SHELL:-s DISABLE_EXCEPTION_THROWING=0") diff --git a/js/web/package.json b/js/web/package.json index d0bad8ffba128..a920ba38aa071 100644 --- a/js/web/package.json +++ b/js/web/package.json @@ -45,7 +45,7 @@ "@webgpu/types": "^0.1.30", "base64-js": "^1.5.1", "chai": "^4.3.7", - "electron": "^23.1.2", + "electron": "^23.3.13", "globby": "^13.1.3", "karma": "^6.4.1", "karma-browserstack-launcher": "^1.6.0", diff --git a/onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc b/onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc index 8e12d860e7785..30a7c7d876672 100644 --- a/onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc +++ b/onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc @@ -2,9 +2,8 @@ // Licensed under the MIT License. #ifdef DEBUG_NODE_INPUTS_OUTPUTS -#include -#include -#include +//#include +//#include #include #include "core/framework/debug_node_inputs_outputs_utils.h" #include "core/framework/print_tensor_utils.h" @@ -79,10 +78,36 @@ void DumpTensorToStdOut(const Tensor& tensor, const std::string tensor_name, con auto data = tensor.Data(); const auto& shape = tensor.Shape(); auto num_items = shape.Size(); + auto numDimensions = shape.NumDimensions(); // const char* name = "tempname"; + int64_t shape2[numDimensions]; + for (size_t i =0 ; i < numDimensions; i ++) { + shape2[i] = shape[i]; + } + EM_ASM( { - function SaveObjectsToFile(jsonObjects, prefix = 'Uint8Array') { + + DataView.prototype.getUint64 = function(byteOffset, littleEndian) { + // split 64-bit number into two 32-bit parts + const left = this.getUint32(byteOffset, littleEndian); + const right = this.getUint32(byteOffset+4, littleEndian); + + // combine the two 32-bit values + const combined = littleEndian? left + 2**32*right : 2**32*left + right; + // console.log("aa" + combined); + + if (!Number.isSafeInteger(combined)) + console.warn(combined, 'exceeds MAX_SAFE_INTEGER. Precision may be lost'); + + return combined; + }; + + BigInt.prototype.toJSON = function () { + return this.toString(16); + }; + function SaveObjectsToFile(jsonObjects) { + const prefix= jsonObjects[name]; const object = jsonObjects; const fileName = `${prefix}.json`; const a = document.createElement('a'); @@ -95,17 +120,28 @@ void DumpTensorToStdOut(const Tensor& tensor, const std::string tensor_name, con a.click(); } const buffer = $0; - const buffer_size = $1 * 4; + + const buffer_size = $1; + console.log(buffer_size); //const name = UTF8ToString(name, 9); - const name = UTF8ToString($2); const bytes = new Uint8Array(buffer_size); bytes.set(HEAPU8.subarray(buffer, buffer + buffer_size)); //SaveObjectsToFile("This is from EM_ASM"); - SaveObjectsToFile(new Float32Array(bytes.buffer), name); + // shape + + const shape_ptr = $2; + const shape_size = $3*8; + console.log(shape_size); + const shape_bytes = new Uint8Array(shape_size); + shape_bytes.set(HEAPU8.subarray(shape_ptr, shape_ptr + shape_size)); + const name = UTF8ToString($4); + const shape_int64 = new BigInt64Array(shape_bytes.buffer); + SaveObjectsToFile({'name': name, 'data': new Float32Array(bytes.buffer), 'shape':shape_int64, 'size': buffer_size}); // console.log($0); //return $0; }, - reinterpret_cast(data), static_cast(num_items), reinterpret_cast(tensor_name.c_str())); + reinterpret_cast(data), static_cast(num_items*4), shape2, numDimensions, reinterpret_cast(tensor_name.c_str())); + onnxruntime::utils::PrintCpuTensor(tensor, dump_options.snippet_threshold, dump_options.snippet_edge_items); if (dump_options.dump_flags & NodeDumpOptions::DumpFlags::StatisticsData) { onnxruntime::utils::PrintCpuTensorStats(tensor); @@ -543,9 +579,7 @@ void DumpNodeInputs( tensor_metadata.name = input_defs[i]->Name(); tensor_metadata.step = dump_context.iteration; tensor_metadata.consumer = node.Name() + ":" + std::to_string(i); - std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n"; DumpTensor(dump_options, *tensor, tensor_metadata, session_state); - std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n"; //} } else { std::cout << " is empty optional tensor.\n"; diff --git a/onnxruntime/core/framework/sequential_executor.cc b/onnxruntime/core/framework/sequential_executor.cc index ba68bc1d7d834..bc9edc5696b6c 100644 --- a/onnxruntime/core/framework/sequential_executor.cc +++ b/onnxruntime/core/framework/sequential_executor.cc @@ -327,7 +327,7 @@ class KernelScope { #endif #ifdef DEBUG_NODE_INPUTS_OUTPUTS - utils::DumpNodeInputs(dump_context_, kernel_context_, kernel_.Node(), session_state_); + // utils::DumpNodeInputs(dump_context_, kernel_context_, kernel_.Node(), session_state_); #endif #ifdef ENABLE_NVTX_PROFILE @@ -401,6 +401,7 @@ class KernelScope { #endif #ifdef DEBUG_NODE_INPUTS_OUTPUTS + utils::DumpNodeInputs(dump_context_, kernel_context_, kernel_.Node(), session_state_); utils::DumpNodeOutputs(dump_context_, kernel_context_, kernel_.Node(), session_state_); #endif } //~KernelScope