Skip to content

Commit

Permalink
Support int64
Browse files Browse the repository at this point in the history
  • Loading branch information
axinging committed Oct 12, 2023
1 parent 7886f39 commit a5769f0
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 12 deletions.
1 change: 1 addition & 0 deletions cmake/onnxruntime_webassembly.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ else()
if (onnxruntime_USE_WEBNN)
set_property(TARGET onnxruntime_webassembly APPEND_STRING PROPERTY LINK_FLAGS " --bind -sWASM_BIGINT")
endif()
set_property(TARGET onnxruntime_webassembly APPEND_STRING PROPERTY LINK_FLAGS " --bind -sWASM_BIGINT")

# Set link flag to enable exceptions support, this will override default disabling exception throwing behavior when disable exceptions.
target_link_options(onnxruntime_webassembly PRIVATE "SHELL:-s DISABLE_EXCEPTION_THROWING=0")
Expand Down
2 changes: 1 addition & 1 deletion js/web/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
"@webgpu/types": "^0.1.30",
"base64-js": "^1.5.1",
"chai": "^4.3.7",
"electron": "^23.1.2",
"electron": "^23.3.13",
"globby": "^13.1.3",
"karma": "^6.4.1",
"karma-browserstack-launcher": "^1.6.0",
Expand Down
54 changes: 44 additions & 10 deletions onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
// Licensed under the MIT License.

#ifdef DEBUG_NODE_INPUTS_OUTPUTS
#include <fstream>
#include <iostream>
#include <string>
//#include <iostream>
//#include <string>
#include <emscripten/emscripten.h>
#include "core/framework/debug_node_inputs_outputs_utils.h"
#include "core/framework/print_tensor_utils.h"
Expand Down Expand Up @@ -79,10 +78,36 @@ void DumpTensorToStdOut(const Tensor& tensor, const std::string tensor_name, con
auto data = tensor.Data<T>();
const auto& shape = tensor.Shape();
auto num_items = shape.Size();
auto numDimensions = shape.NumDimensions();
// const char* name = "tempname";
int64_t shape2[numDimensions];
for (size_t i =0 ; i < numDimensions; i ++) {
shape2[i] = shape[i];
}

EM_ASM(
{
function SaveObjectsToFile(jsonObjects, prefix = 'Uint8Array') {

DataView.prototype.getUint64 = function(byteOffset, littleEndian) {
// split 64-bit number into two 32-bit parts
const left = this.getUint32(byteOffset, littleEndian);
const right = this.getUint32(byteOffset+4, littleEndian);

// combine the two 32-bit values
const combined = littleEndian? left + 2**32*right : 2**32*left + right;
// console.log("aa" + combined);

if (!Number.isSafeInteger(combined))
console.warn(combined, 'exceeds MAX_SAFE_INTEGER. Precision may be lost');

return combined;
};

BigInt.prototype.toJSON = function () {
return this.toString(16);
};
function SaveObjectsToFile(jsonObjects) {
const prefix= jsonObjects[name];
const object = jsonObjects;
const fileName = `${prefix}.json`;
const a = document.createElement('a');
Expand All @@ -95,17 +120,28 @@ void DumpTensorToStdOut(const Tensor& tensor, const std::string tensor_name, con
a.click();
}
const buffer = $0;
const buffer_size = $1 * 4;

const buffer_size = $1;
console.log(buffer_size);
//const name = UTF8ToString(name, 9);
const name = UTF8ToString($2);
const bytes = new Uint8Array(buffer_size);
bytes.set(HEAPU8.subarray(buffer, buffer + buffer_size));
//SaveObjectsToFile("This is from EM_ASM");
SaveObjectsToFile(new Float32Array(bytes.buffer), name);
// shape

const shape_ptr = $2;
const shape_size = $3*8;
console.log(shape_size);
const shape_bytes = new Uint8Array(shape_size);
shape_bytes.set(HEAPU8.subarray(shape_ptr, shape_ptr + shape_size));
const name = UTF8ToString($4);
const shape_int64 = new BigInt64Array(shape_bytes.buffer);
SaveObjectsToFile({'name': name, 'data': new Float32Array(bytes.buffer), 'shape':shape_int64, 'size': buffer_size});
// console.log($0);
//return $0;
},
reinterpret_cast<int32_t>(data), static_cast<int32_t>(num_items), reinterpret_cast<int32_t>(tensor_name.c_str()));
reinterpret_cast<int32_t>(data), static_cast<int32_t>(num_items*4), shape2, numDimensions, reinterpret_cast<int32_t>(tensor_name.c_str()));

onnxruntime::utils::PrintCpuTensor<T>(tensor, dump_options.snippet_threshold, dump_options.snippet_edge_items);
if (dump_options.dump_flags & NodeDumpOptions::DumpFlags::StatisticsData) {
onnxruntime::utils::PrintCpuTensorStats<T>(tensor);
Expand Down Expand Up @@ -543,9 +579,7 @@ void DumpNodeInputs(
tensor_metadata.name = input_defs[i]->Name();
tensor_metadata.step = dump_context.iteration;
tensor_metadata.consumer = node.Name() + ":" + std::to_string(i);
std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n";
DumpTensor(dump_options, *tensor, tensor_metadata, session_state);
std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n";
//}
} else {
std::cout << " is empty optional tensor.\n";
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/core/framework/sequential_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ class KernelScope {
#endif

#ifdef DEBUG_NODE_INPUTS_OUTPUTS
utils::DumpNodeInputs(dump_context_, kernel_context_, kernel_.Node(), session_state_);
// utils::DumpNodeInputs(dump_context_, kernel_context_, kernel_.Node(), session_state_);
#endif

#ifdef ENABLE_NVTX_PROFILE
Expand Down Expand Up @@ -401,6 +401,7 @@ class KernelScope {
#endif

#ifdef DEBUG_NODE_INPUTS_OUTPUTS
utils::DumpNodeInputs(dump_context_, kernel_context_, kernel_.Node(), session_state_);
utils::DumpNodeOutputs(dump_context_, kernel_context_, kernel_.Node(), session_state_);
#endif
} //~KernelScope
Expand Down

0 comments on commit a5769f0

Please sign in to comment.