Skip to content

Commit

Permalink
#22890 Fix profiling on empty Optional (#22891)
Browse files Browse the repository at this point in the history
### Description
Fix sequential_executor.cc to avoid segfault when profiling is used on
model with empty Optional



### Motivation and Context
Fixes #22890
  • Loading branch information
amancini-N authored Nov 26, 2024
1 parent afbb539 commit 8826e39
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 2 deletions.
4 changes: 2 additions & 2 deletions onnxruntime/core/framework/sequential_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ static void CalculateTotalOutputSizes(OpKernelContextInternal* op_kernel_context
int output_count = op_kernel_context->OutputCount();
for (auto i = 0; i < output_count; i++) {
const OrtValue* p_output = op_kernel_context->GetOutputMLValue(i);
if (p_output != nullptr && p_output->IsTensor()) {
if (p_output != nullptr && p_output->IsTensor() && p_output->IsAllocated()) {
const auto& tensor = p_output->Get<Tensor>();
size_t tensor_size = tensor.SizeInBytes();
#if defined(TRACE_EXECUTION)
Expand Down Expand Up @@ -104,7 +104,7 @@ static void CalculateTotalInputSizes(const OpKernelContextInternal* op_kernel_co
const int input_count = op_kernel_context->InputCount();
for (auto i = 0; i < input_count; i++) {
const OrtValue* p_input = op_kernel_context->GetInputMLValue(i);
if (p_input != nullptr && p_input->IsTensor()) {
if (p_input != nullptr && p_input->IsTensor() && p_input->IsAllocated()) {
const OpKernelInfo& op_kernel_info = p_op_kernel->Info();
const Tensor* p_tensor = nullptr;
bool is_param = op_kernel_info.TryGetConstantInput(i, &p_tensor);
Expand Down
41 changes: 41 additions & 0 deletions onnxruntime/test/framework/inference_session_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -818,6 +818,47 @@ TEST(InferenceSessionTests, CheckRunProfilerStartTime) {
ASSERT_TRUE(before_start_time <= profiling_start_time && profiling_start_time <= after_start_time);
}

TEST(InferenceSessionTests, CheckRunProfilerWithOptionalValues) {
// Test whether the profiler can work on model with optional values
SessionOptions so;

so.session_logid = "CheckRunProfiler";
so.enable_profiling = true;
so.profile_file_prefix = ORT_TSTR("onnxprofile_profile_test");

InferenceSession session_object(so, GetEnvironment());
ASSERT_STATUS_OK(session_object.Load(ORT_TSTR("testdata/relu_with_optional.onnx")));
ASSERT_STATUS_OK(session_object.Initialize());

RunOptions run_options;
run_options.run_tag = "RunTag";

// prepare inputs
std::vector<int64_t> dims_x = {1};
std::vector<int> values_x = {-4};
OrtValue ml_value;
CreateMLValue<int>(TestCPUExecutionProvider()->CreatePreferredAllocators()[0], dims_x, values_x, &ml_value);
NameMLValMap feeds;
feeds.insert(std::make_pair("input", ml_value));

// prepare outputs
std::vector<std::string> output_names;
output_names.push_back("output");
std::vector<OrtValue> fetches;

// prepare expected inputs and outputs
std::vector<int64_t> expected_dims_y = {1};
std::vector<int> expected_values_y = {0};

// Now run
common::Status st = session_object.Run(run_options, feeds, output_names, &fetches);
if (!st.IsOK()) {
std::cout << "Run returned status: " << st.ErrorMessage() << std::endl;
}
ASSERT_TRUE(st.IsOK());
VerifyOutputs<int>(fetches.at(0).Get<Tensor>(), expected_dims_y, expected_values_y);
}

TEST(InferenceSessionTests, MultipleSessionsNoTimeout) {
SessionOptions session_options;

Expand Down
Binary file added onnxruntime/test/testdata/relu_with_optional.onnx
Binary file not shown.

0 comments on commit 8826e39

Please sign in to comment.