Skip to content

Commit

Permalink
fixup! Fixing linting/formatting and more issues.
Browse files Browse the repository at this point in the history
  • Loading branch information
TedThemistokleous committed Oct 20, 2023
1 parent b082a99 commit 487a27a
Showing 1 changed file with 20 additions and 11 deletions.
31 changes: 20 additions & 11 deletions onnxruntime/python/onnxruntime_pybind_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -744,7 +744,7 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
} else if (option.second == "False" || option.second == "false") {
params.migraphx_fp16_enable = false;
} else {
ORT_THROW("[ERROR] [MIGraphX] The value for the key 'trt_fp16_enable' should be
ORT_THROW("[ERROR] [MIGraphX] The value for the key 'trt_fp16_enable' should be \
'True' or 'False'. Default value is 'False'.\n");
}
} else if (option.first == "migraphx_int8_enable") {
Expand All @@ -753,15 +753,15 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
} else if (option.second == "False" || option.second == "false") {
params.migraphx_int8_enable = false;
} else {
ORT_THROW("[ERROR] [MIGraphX] The value for the key 'migx_int8_enable' should be
ORT_THROW("[ERROR] [MIGraphX] The value for the key 'migx_int8_enable' should be \
'True' or 'False'. Default value is 'False'.\n");
}
} else if (option.first == "migraphx_int8_calibration_table_name") {
if (!option.second.empty()) {
calibration_table = option.second;
params.migraphx_int8_calibration_table_name = calibration_table.c_str();
} else {
ORT_THROW("[ERROR] [MIGraphX] The value for the key 'migx_int8_calibration_table_name' should be a
ORT_THROW("[ERROR] [MIGraphX] The value for the key 'migx_int8_calibration_table_name' should be a \
file name i.e. 'cal_table'.\n");
}
} else if (option.first == "migraphx_use_native_calibration_table") {
Expand All @@ -770,7 +770,7 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
} else if (option.second == "False" || option.second == "false") {
params.migraphx_use_native_calibration_table = false;
} else {
ORT_THROW("[ERROR] [MIGraphX] The value for the key 'migx_int8_use_native_calibration_table' should be
ORT_THROW("[ERROR] [MIGraphX] The value for the key 'migx_int8_use_native_calibration_table' should be \
'True' or 'False'. Default value is 'False'.\n");
}
} else {
Expand All @@ -790,25 +790,34 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
#endif
} else if (type == kCudaExecutionProvider) {
#ifdef USE_CUDA
// If the environment variable 'CUDA_UNAVAILABLE' exists, then we do not load cuda. This is set by _ld_preload for the manylinux case
// as in that case, trying to load the library itself will result in a crash due to the way that auditwheel strips dependencies.
// If the environment variable 'CUDA_UNAVAILABLE' exists, then we do not load cuda.
// This is set by _ld_preload for the manylinux case as in that case,
// trying to load the library itself will result in a crash due to the way that auditwheel strips dependencies.
if (Env::Default().GetEnvironmentVar("ORT_CUDA_UNAVAILABLE").empty()) {
if (auto* cuda_provider_info = TryGetProviderInfo_CUDA()) {
const CUDAExecutionProviderInfo info = GetCudaExecutionProviderInfo(cuda_provider_info,
provider_options_map);

// This variable is never initialized because the APIs by which it should be initialized are deprecated, however they still
// exist are are in-use. Neverthless, it is used to return CUDAAllocator, hence we must try to initialize it here if we can
// since FromProviderOptions might contain external CUDA allocator.
// This variable is never initialized because the APIs by which it should be initialized are deprecated,
// however they still exist are are in-use. Neverthless, it is used to return CUDAAllocator,
// hence we must try to initialize it here if we can since FromProviderOptions might contain
// external CUDA allocator.
external_allocator_info = info.external_allocator_info;
return cuda_provider_info->CreateExecutionProviderFactory(info)->CreateProvider();
} else {
if (!Env::Default().GetEnvironmentVar("CUDA_PATH").empty()) {
ORT_THROW("CUDA_PATH is set but CUDA wasn't able to be loaded. Please install the correct version of CUDA and cuDNN as mentioned in the GPU requirements page (https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements), make sure they're in the PATH, and that your GPU is supported.");
ORT_THROW("CUDA_PATH is set but CUDA wasn't able to be loaded. Please install the correct version of CUDA and
cuDNN as mentioned in the GPU requirements page
(https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements),
make sure they're in the PATH, and that your GPU is supported.");
}
}
}
LOGS_DEFAULT(WARNING) << "Failed to create " << type << ". Please reference https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements to ensure all dependencies are met.";
LOGS_DEFAULT(WARNING) << "Failed to create "
<< type
<< ". Please reference
https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements
to ensure all dependencies are met.";
#endif
} else if (type == kRocmExecutionProvider) {
#ifdef USE_ROCM
Expand Down

0 comments on commit 487a27a

Please sign in to comment.