From 7886f394973a1a5f5dba9b2405fba08c43accb3b Mon Sep 17 00:00:00 2001 From: Xu Xing Date: Thu, 12 Oct 2023 13:34:26 +0800 Subject: [PATCH] [webgpu] dump test --- .../debug_node_inputs_outputs_utils.cc | 66 +++++++++++++++++-- .../framework/print_tensor_statistics_utils.h | 2 +- 2 files changed, 60 insertions(+), 8 deletions(-) diff --git a/onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc b/onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc index ec50bb7d6a5cb..8e12d860e7785 100644 --- a/onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc +++ b/onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc @@ -2,7 +2,10 @@ // Licensed under the MIT License. #ifdef DEBUG_NODE_INPUTS_OUTPUTS - +#include +#include +#include +#include #include "core/framework/debug_node_inputs_outputs_utils.h" #include "core/framework/print_tensor_utils.h" #include "core/framework/print_tensor_statistics_utils.h" @@ -59,7 +62,50 @@ bool FilterNode(const NodeDumpOptions& dump_options, const Node& node) { } template -void DumpTensorToStdOut(const Tensor& tensor, const NodeDumpOptions& dump_options) { +void DumpTensorToStdOut(const Tensor& tensor, const std::string tensor_name, const NodeDumpOptions& dump_options) { + //void send(void const * data, unsigned length) { + /* + auto data = tensor.Data(); + const auto& shape = tensor.Shape(); + auto num_items = shape.Size(); + EM_ASM({ + + Module.send(HEAPU8.subarray($0, $0 + $1)); + + }, data, num_items); + */ + + //} + auto data = tensor.Data(); + const auto& shape = tensor.Shape(); + auto num_items = shape.Size(); + // const char* name = "tempname"; + EM_ASM( + { + function SaveObjectsToFile(jsonObjects, prefix = 'Uint8Array') { + const object = jsonObjects; + const fileName = `${prefix}.json`; + const a = document.createElement('a'); + const file = new Blob([JSON.stringify(object)], { + type: + 'application/json' + }); + a.href = URL.createObjectURL(file); + a.download = fileName; + a.click(); + } + const buffer = $0; + const buffer_size = $1 * 4; + //const name = UTF8ToString(name, 9); + const name = UTF8ToString($2); + const bytes = new Uint8Array(buffer_size); + bytes.set(HEAPU8.subarray(buffer, buffer + buffer_size)); + //SaveObjectsToFile("This is from EM_ASM"); + SaveObjectsToFile(new Float32Array(bytes.buffer), name); + // console.log($0); + //return $0; + }, + reinterpret_cast(data), static_cast(num_items), reinterpret_cast(tensor_name.c_str())); onnxruntime::utils::PrintCpuTensor(tensor, dump_options.snippet_threshold, dump_options.snippet_edge_items); if (dump_options.dump_flags & NodeDumpOptions::DumpFlags::StatisticsData) { onnxruntime::utils::PrintCpuTensorStats(tensor); @@ -298,11 +344,12 @@ void DumpCpuTensor( const Tensor& tensor, const TensorMetadata& tensor_metadata) { switch (dump_options.data_destination) { case NodeDumpOptions::DataDestination::StdOut: { - DispatchOnTensorType(tensor.DataType(), DumpTensorToStdOut, tensor, dump_options); + DispatchOnTensorType(tensor.DataType(), DumpTensorToStdOut, tensor, tensor_metadata.name, dump_options); break; } case NodeDumpOptions::DataDestination::TensorProtoFiles: { const Path tensor_file = dump_options.output_dir / Path::Parse(MakeTensorFileName(tensor_metadata.name, dump_options)); + std::cout<<" tensor_file =" <Name(); tensor_metadata.step = dump_context.iteration; tensor_metadata.consumer = node.Name() + ":" + std::to_string(i); + std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n"; DumpTensor(dump_options, *tensor, tensor_metadata, session_state); - } + std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n"; + //} } else { std::cout << " is empty optional tensor.\n"; } @@ -562,12 +612,14 @@ void DumpNodeOutputs( const bool is_shape_set = (dump_options.dump_flags & NodeDumpOptions::DumpFlags::Shape) != 0; PrintIf(is_shape_set, MakeString(" Shape: ", shape, "\n")); - if ((dump_options.dump_flags & NodeDumpOptions::DumpFlags::OutputData) != 0) { + //if ((dump_options.dump_flags & NodeDumpOptions::DumpFlags::OutputData) != 0) { tensor_metadata.name = output_defs[i]->Name(); tensor_metadata.step = dump_context.iteration; tensor_metadata.producer = node.Name() + ":" + std::to_string(i); + std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n"; DumpTensor(dump_options, *tensor, tensor_metadata, session_state); - } + std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n"; + //} } else { std::cout << " is empty optional tensor.\n"; } diff --git a/onnxruntime/core/framework/print_tensor_statistics_utils.h b/onnxruntime/core/framework/print_tensor_statistics_utils.h index fd036114f3e76..40341c5547dd2 100644 --- a/onnxruntime/core/framework/print_tensor_statistics_utils.h +++ b/onnxruntime/core/framework/print_tensor_statistics_utils.h @@ -139,7 +139,7 @@ void PrintCpuTensorStats(const Tensor& tensor) { } const T* data = tensor.Data(); - PrintTensorStats(data, num_items); + PrintTensorStats(data, (size_t)num_items); std::cout << std::endl; }