Skip to content

Commit

Permalink
[webgpu] dump test
Browse files Browse the repository at this point in the history
  • Loading branch information
axinging committed Oct 12, 2023
1 parent 53be802 commit 7886f39
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 8 deletions.
66 changes: 59 additions & 7 deletions onnxruntime/core/framework/debug_node_inputs_outputs_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@
// Licensed under the MIT License.

#ifdef DEBUG_NODE_INPUTS_OUTPUTS

#include <fstream>
#include <iostream>
#include <string>
#include <emscripten/emscripten.h>
#include "core/framework/debug_node_inputs_outputs_utils.h"
#include "core/framework/print_tensor_utils.h"
#include "core/framework/print_tensor_statistics_utils.h"
Expand Down Expand Up @@ -59,7 +62,50 @@ bool FilterNode(const NodeDumpOptions& dump_options, const Node& node) {
}

template <typename T>
void DumpTensorToStdOut(const Tensor& tensor, const NodeDumpOptions& dump_options) {
void DumpTensorToStdOut(const Tensor& tensor, const std::string tensor_name, const NodeDumpOptions& dump_options) {
//void send(void const * data, unsigned length) {
/*
auto data = tensor.Data<T>();
const auto& shape = tensor.Shape();
auto num_items = shape.Size();
EM_ASM({
Module.send(HEAPU8.subarray($0, $0 + $1));
}, data, num_items);
*/

//}
auto data = tensor.Data<T>();
const auto& shape = tensor.Shape();
auto num_items = shape.Size();
// const char* name = "tempname";
EM_ASM(
{
function SaveObjectsToFile(jsonObjects, prefix = 'Uint8Array') {
const object = jsonObjects;
const fileName = `${prefix}.json`;
const a = document.createElement('a');
const file = new Blob([JSON.stringify(object)], {
type:
'application/json'
});
a.href = URL.createObjectURL(file);
a.download = fileName;
a.click();
}
const buffer = $0;
const buffer_size = $1 * 4;
//const name = UTF8ToString(name, 9);
const name = UTF8ToString($2);
const bytes = new Uint8Array(buffer_size);
bytes.set(HEAPU8.subarray(buffer, buffer + buffer_size));
//SaveObjectsToFile("This is from EM_ASM");
SaveObjectsToFile(new Float32Array(bytes.buffer), name);
// console.log($0);
//return $0;
},
reinterpret_cast<int32_t>(data), static_cast<int32_t>(num_items), reinterpret_cast<int32_t>(tensor_name.c_str()));
onnxruntime::utils::PrintCpuTensor<T>(tensor, dump_options.snippet_threshold, dump_options.snippet_edge_items);
if (dump_options.dump_flags & NodeDumpOptions::DumpFlags::StatisticsData) {
onnxruntime::utils::PrintCpuTensorStats<T>(tensor);
Expand Down Expand Up @@ -298,11 +344,12 @@ void DumpCpuTensor(
const Tensor& tensor, const TensorMetadata& tensor_metadata) {
switch (dump_options.data_destination) {
case NodeDumpOptions::DataDestination::StdOut: {
DispatchOnTensorType(tensor.DataType(), DumpTensorToStdOut, tensor, dump_options);
DispatchOnTensorType(tensor.DataType(), DumpTensorToStdOut, tensor, tensor_metadata.name, dump_options);
break;
}
case NodeDumpOptions::DataDestination::TensorProtoFiles: {
const Path tensor_file = dump_options.output_dir / Path::Parse(MakeTensorFileName(tensor_metadata.name, dump_options));
std::cout<<" tensor_file =" <<tensor_file.ToPathString() <<", tensor_metadata.name="<<tensor_metadata.name<<"\n";
DumpTensorToFile(tensor, tensor_metadata.name, tensor_file);
break;
}
Expand All @@ -325,6 +372,7 @@ void DumpTensor(
const SessionState& session_state) {
// check tensor is on CPU before dumping it
auto& tensor_location = tensor.Location();
std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n";
if (tensor_location.device.Type() == OrtDevice::CPU ||
tensor_location.mem_type == OrtMemTypeCPUInput ||
tensor_location.mem_type == OrtMemTypeCPUOutput) {
Expand Down Expand Up @@ -491,12 +539,14 @@ void DumpNodeInputs(
const bool is_shape_set = (dump_options.dump_flags & NodeDumpOptions::DumpFlags::Shape) != 0;
PrintIf(is_shape_set, MakeString(" Shape: ", shape, "\n"));

if ((dump_options.dump_flags & NodeDumpOptions::DumpFlags::InputData) != 0) {
//if ((dump_options.dump_flags & NodeDumpOptions::DumpFlags::InputData) != 0) {
tensor_metadata.name = input_defs[i]->Name();
tensor_metadata.step = dump_context.iteration;
tensor_metadata.consumer = node.Name() + ":" + std::to_string(i);
std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n";
DumpTensor(dump_options, *tensor, tensor_metadata, session_state);
}
std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n";
//}
} else {
std::cout << " is empty optional tensor.\n";
}
Expand Down Expand Up @@ -562,12 +612,14 @@ void DumpNodeOutputs(
const bool is_shape_set = (dump_options.dump_flags & NodeDumpOptions::DumpFlags::Shape) != 0;
PrintIf(is_shape_set, MakeString(" Shape: ", shape, "\n"));

if ((dump_options.dump_flags & NodeDumpOptions::DumpFlags::OutputData) != 0) {
//if ((dump_options.dump_flags & NodeDumpOptions::DumpFlags::OutputData) != 0) {
tensor_metadata.name = output_defs[i]->Name();
tensor_metadata.step = dump_context.iteration;
tensor_metadata.producer = node.Name() + ":" + std::to_string(i);
std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n";
DumpTensor(dump_options, *tensor, tensor_metadata, session_state);
}
std::cout<< __FUNCTION__<<":"<<__LINE__<<"\n";
//}
} else {
std::cout << " is empty optional tensor.\n";
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/framework/print_tensor_statistics_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ void PrintCpuTensorStats(const Tensor& tensor) {
}

const T* data = tensor.Data<T>();
PrintTensorStats<T>(data, num_items);
PrintTensorStats<T>(data, (size_t)num_items);
std::cout << std::endl;
}

Expand Down

0 comments on commit 7886f39

Please sign in to comment.