Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
yf711 committed Dec 15, 2023
1 parent 7c42222 commit b36c62c
Showing 1 changed file with 9 additions and 10 deletions.
19 changes: 9 additions & 10 deletions onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2729,12 +2729,11 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector<FusedNodeAnd
LOGS_DEFAULT(WARNING) << "[TensorRT EP] Builder heuristics are enabled. For TRT > 8.5, trt_build_heuristics_enable is deprecated, please set builder optimization level as 2 to enable builder heuristics.";

Check warning on line 2729 in onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc#L2729

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc:2729:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
}
#elif NV_TENSORRT_MAJOR == 8 && NV_TENSORRT_MINOR > 5 || NV_TENSORRT_MAJOR > 8
// for TRT 8.6 onwards, heuristic-based tactic option is automatically enabled by setting builder optimization level 2
// for TRT 8.6 onwards, heuristic-based tactic option is automatically enabled by setting builder optimization level 2

Check warning on line 2732 in onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc#L2732

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc:2732:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
if (build_heuristics_enable_) {
if (builder_optimization_level_ == 2) {
LOGS_DEFAULT(WARNING) << "[TensorRT EP] Builder heuristics are enabled automatically by builder optimization level 2. trt_build_heuristics_enable is deprecated on TRT 8.6 onwards.";
}
else {
LOGS_DEFAULT(WARNING) << "[TensorRT EP] Builder heuristics are automatically enabled by builder optimization level 2. trt_build_heuristics_enable is deprecated on TRT 8.6 onwards.";

Check warning on line 2735 in onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc#L2735

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc:2735:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
}else {

Check warning on line 2736 in onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc#L2736

Missing space before else [whitespace/braces] [5]
Raw output
onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc:2736:  Missing space before else  [whitespace/braces] [5]
LOGS_DEFAULT(WARNING) << "[TensorRT EP] trt_build_heuristics_enable is deprecated on TRT 8.6 onwards. Please set builder optimization level as 2 to enable builder heuristics.";

Check warning on line 2737 in onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc#L2737

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc:2737:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
}
}
Expand Down Expand Up @@ -2814,7 +2813,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector<FusedNodeAnd
engine_file.seekg(0, std::ios::beg);
std::unique_ptr<char[]> engine_buf{new char[engine_size]};
engine_file.read((char*)engine_buf.get(), engine_size);
trt_engine = std::unique_ptr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(engine_buf.get(), engine_size, nullptr));
trt_engine = std::unique_ptr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(engine_buf.get(), engine_size));
LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] DeSerialized " + engine_cache_path;
if (trt_engine == nullptr) {
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL,
Expand All @@ -2833,7 +2832,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector<FusedNodeAnd
"TensorRT EP could not call engine decryption function decrypt");
}
// Deserialize engine
trt_engine = std::unique_ptr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(engine_buf.get(), engine_size, nullptr));
trt_engine = std::unique_ptr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(engine_buf.get(), engine_size));
LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] Decrypted and DeSerialized " + encrypted_engine_cache_path;
if (trt_engine == nullptr) {
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL,
Expand Down Expand Up @@ -2874,7 +2873,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector<FusedNodeAnd
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL,
"TensorRT EP failed to create engine from network for fused node: " + fused_node.Name());
}
trt_engine = std::unique_ptr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(serialized_engine->data(), serialized_engine->size(), nullptr));
trt_engine = std::unique_ptr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(serialized_engine->data(), serialized_engine->size()));
if (trt_engine == nullptr) {
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL,
"TensorRT EP failed to deserialize engine for fused node: " + fused_node.Name());
Expand Down Expand Up @@ -3078,7 +3077,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector<FusedNodeAnd
// https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#threading
trt_state->engine->reset();
*(trt_state->engine) = std::unique_ptr<nvinfer1::ICudaEngine>(
trt_state->runtime->deserializeCudaEngine(engine_buf.get(), engine_size, nullptr));
trt_state->runtime->deserializeCudaEngine(engine_buf.get(), engine_size));
if (!(*(trt_state->engine))) {
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL, "TensorRT EP Failed to Build Engine.");
}
Expand All @@ -3103,7 +3102,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector<FusedNodeAnd
// Note: Deserializing an engine from a TensorRT runtime is thread safe per TRT doc
// https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#threading
trt_state->engine->reset();
*(trt_state->engine) = std::unique_ptr<nvinfer1::ICudaEngine>(trt_state->runtime->deserializeCudaEngine(engine_buf.get(), engine_size, nullptr));
*(trt_state->engine) = std::unique_ptr<nvinfer1::ICudaEngine>(trt_state->runtime->deserializeCudaEngine(engine_buf.get(), engine_size));
if (!(*(trt_state->engine))) {
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL,
"TensorRT EP could not deserialize engine from encrypted cache: " + encrypted_engine_cache_path);
Expand Down Expand Up @@ -3234,7 +3233,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector<FusedNodeAnd
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL, "TensorRT EP failed to create engine from network.");
}
*(trt_state->engine) = std::unique_ptr<nvinfer1::ICudaEngine>(
trt_state->runtime->deserializeCudaEngine(serialized_engine->data(), serialized_engine->size(), nullptr));
trt_state->runtime->deserializeCudaEngine(serialized_engine->data(), serialized_engine->size()));
if (!(*(trt_state->engine))) {
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL, "TensorRT EP failed to deserialize engine.");
}
Expand Down

0 comments on commit b36c62c

Please sign in to comment.