Skip to content

Commit

Permalink
revert
Browse files Browse the repository at this point in the history
  • Loading branch information
yf711 committed Oct 21, 2024
1 parent 3189d66 commit d45facb
Showing 1 changed file with 45 additions and 21 deletions.
66 changes: 45 additions & 21 deletions onnxruntime/test/perftest/ort_test_session.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,6 @@
#include "core/providers/dml/dml_session_options_config_keys.h"
#endif

#ifdef USE_TENSORRT
#include <vector>
#include "core/session/onnxruntime_c_api_ep.h"
#endif

#ifdef _WIN32
#define strdup _strdup
#endif
Expand All @@ -34,21 +29,6 @@ extern const OrtApi* g_ort;
namespace onnxruntime {
namespace perftest {

inline void THROW_ON_ERROR(OrtStatus* status) {
if (status != nullptr) {
std::cout << "ErrorMessage:" << g_ort->GetErrorMessage(status) << "\n";
abort();
}
}

void TestTensorRTEp(const OrtApi* g_ort, OrtEnv* env, OrtSessionOptions* so, int device_id) {
THROW_ON_ERROR(g_ort->RegisterOrtExecutionProviderLibrary("/home/yifanl/onnxruntime/samples/tensorRTEp/build/libTensorRTEp.so", env, "tensorrtEp"));
std::vector<const char*> keys{"device_id", "str_property"};
std::string device_id_str = std::to_string(device_id);
std::vector<const char*> values{device_id_str.c_str(), "strvalue"};
THROW_ON_ERROR(g_ort->SessionOptionsAppendOrtExecutionProvider(so, "tensorrtEp", env, keys.data(), values.data(), keys.size()));
}

std::chrono::duration<double> OnnxRuntimeTestSession::Run() {
// Randomly pick one OrtValueArray from test_inputs_. (NOT ThreadSafe)
const std::uniform_int_distribution<int>::param_type p(0, static_cast<int>(test_inputs_.size() - 1));
Expand Down Expand Up @@ -189,7 +169,50 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device
#endif
} else if (provider_name_ == onnxruntime::kTensorrtExecutionProvider) {
#ifdef USE_TENSORRT
TestTensorRTEp(g_ort, env, session_options, 0);
const auto& api = Ort::GetApi();
OrtTensorRTProviderOptionsV2* tensorrt_options;
Ort::ThrowOnError(api.CreateTensorRTProviderOptions(&tensorrt_options));
std::unique_ptr<OrtTensorRTProviderOptionsV2, decltype(api.ReleaseTensorRTProviderOptions)> rel_trt_options(
tensorrt_options, api.ReleaseTensorRTProviderOptions);
std::vector<const char*> option_keys, option_values;
// used to keep all option keys and value strings alive
std::list<std::string> buffer;

#ifdef _MSC_VER
std::string ov_string = ToUTF8String(performance_test_config.run_config.ep_runtime_config_string);
#else
std::string ov_string = performance_test_config.run_config.ep_runtime_config_string;
#endif
std::istringstream ss(ov_string);
std::string token;
while (ss >> token) {
if (token == "") {
continue;
}
auto pos = token.find("|");
if (pos == std::string::npos || pos == 0 || pos == token.length()) {
ORT_THROW(
"[ERROR] [TensorRT] Use a '|' to separate the key and value for the run-time option you are trying to use.\n");
}

buffer.emplace_back(token.substr(0, pos));
option_keys.push_back(buffer.back().c_str());
buffer.emplace_back(token.substr(pos + 1));
option_values.push_back(buffer.back().c_str());
}

Ort::Status status(api.UpdateTensorRTProviderOptions(tensorrt_options,
option_keys.data(), option_values.data(), option_keys.size()));
if (!status.IsOK()) {
OrtAllocator* allocator;
char* options;
Ort::ThrowOnError(api.GetAllocatorWithDefaultOptions(&allocator));
Ort::ThrowOnError(api.GetTensorRTProviderOptionsAsString(tensorrt_options, allocator, &options));
ORT_THROW("[ERROR] [TensorRT] Configuring the CUDA options failed with message: ", status.GetErrorMessage(),
"\nSupported options are:\n", options);
}

session_options.AppendExecutionProvider_TensorRT_V2(*tensorrt_options);

OrtCUDAProviderOptions cuda_options;
cuda_options.device_id = tensorrt_options->device_id;
Expand Down Expand Up @@ -923,3 +946,4 @@ bool OnnxRuntimeTestSession::PopulateGeneratedInputTestData(int32_t seed) {

} // namespace perftest
} // namespace onnxruntime
arrow

0 comments on commit d45facb

Please sign in to comment.