From aef98d31b79a31290d173bff57fcdc66ed0f3370 Mon Sep 17 00:00:00 2001 From: Tianlei Wu Date: Fri, 19 Jul 2024 18:30:38 -0700 Subject: [PATCH 1/2] Fix cuda fallback --- .../onnxruntime_inference_collection.py | 16 +++++++++--- .../python/onnxruntime_pybind_state.cc | 26 ++++++++++--------- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/onnxruntime/python/onnxruntime_inference_collection.py b/onnxruntime/python/onnxruntime_inference_collection.py index ecae280e92ae5..c3cfe2c97ae95 100644 --- a/onnxruntime/python/onnxruntime_inference_collection.py +++ b/onnxruntime/python/onnxruntime_inference_collection.py @@ -438,10 +438,18 @@ def _create_inference_session(self, providers, provider_options, disabled_optimi # Tensorrt can fall back to CUDA if it's explicitly assigned. All others fall back to CPU. if "TensorrtExecutionProvider" in available_providers: - if providers and any( - provider == "CUDAExecutionProvider" - or (isinstance(provider, tuple) and provider[0] == "CUDAExecutionProvider") - for provider in providers + if ( + providers + and any( + provider == "CUDAExecutionProvider" + or (isinstance(provider, tuple) and provider[0] == "CUDAExecutionProvider") + for provider in providers + ) + and any( + provider == "TensorrtExecutionProvider" + or (isinstance(provider, tuple) and provider[0] == "TensorrtExecutionProvider") + for provider in providers + ) ): self._fallback_providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] else: diff --git a/onnxruntime/python/onnxruntime_pybind_state.cc b/onnxruntime/python/onnxruntime_pybind_state.cc index e13285c60e69f..1c23ffa8289be 100644 --- a/onnxruntime/python/onnxruntime_pybind_state.cc +++ b/onnxruntime/python/onnxruntime_pybind_state.cc @@ -35,6 +35,11 @@ #include "contrib_ops/cpu/aten_ops/aten_op_executor.h" #endif +#ifdef USE_CUDA +#include // for CUDA_VERSION +#include // for CUDNN_MAJOR +#endif + #include // Explicitly provide a definition for the static const var 'GPU' in the OrtDevice struct, @@ -946,26 +951,23 @@ std::unique_ptr CreateExecutionProviderInstance( provider_options_map); // This variable is never initialized because the APIs by which it should be initialized are deprecated, - // however they still exist are are in-use. Neverthless, it is used to return CUDAAllocator, + // however they still exist are are in-use. Nevertheless, it is used to return CUDAAllocator, // hence we must try to initialize it here if we can since FromProviderOptions might contain // external CUDA allocator. external_allocator_info = info.external_allocator_info; return cuda_provider_info->CreateExecutionProviderFactory(info)->CreateProvider(); - } else { - if (!Env::Default().GetEnvironmentVar("CUDA_PATH").empty()) { - ORT_THROW( - "CUDA_PATH is set but CUDA wasnt able to be loaded. Please install the correct version of CUDA and" - "cuDNN as mentioned in the GPU requirements page " - " (https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements), " - " make sure they're in the PATH, and that your GPU is supported."); - } } } LOGS_DEFAULT(WARNING) << "Failed to create " << type - << ". Please reference " - << "https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements" - << "to ensure all dependencies are met."; + << ". Require cuDNN " << CUDNN_MAJOR << ".* and " + << "CUDA " << (CUDA_VERSION / 1000) << ".*" +#if defined(_MSC_VER) + << ", and the latest MSVC runtime" +#endif + << ". Please install all dependencies as mentioned in the GPU requirements page" + " (https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements), " + "make sure they're in the PATH, and that your GPU is supported."; #endif } else if (type == kRocmExecutionProvider) { #ifdef USE_ROCM From 2d18ce5120793b3fdc7506d2ed08b9af6dcdbe50 Mon Sep 17 00:00:00 2001 From: Tianlei Wu Date: Sat, 20 Jul 2024 20:46:06 +0000 Subject: [PATCH 2/2] include directory of cuda.h --- cmake/onnxruntime_python.cmake | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmake/onnxruntime_python.cmake b/cmake/onnxruntime_python.cmake index 07c65e7986b05..270139ceaff7b 100644 --- a/cmake/onnxruntime_python.cmake +++ b/cmake/onnxruntime_python.cmake @@ -97,8 +97,12 @@ endif() onnxruntime_add_include_to_target(onnxruntime_pybind11_state Python::Module Python::NumPy) target_include_directories(onnxruntime_pybind11_state PRIVATE ${ONNXRUNTIME_ROOT} ${pybind11_INCLUDE_DIRS}) -if(onnxruntime_USE_CUDA AND onnxruntime_CUDNN_HOME) - target_include_directories(onnxruntime_pybind11_state PRIVATE ${onnxruntime_CUDNN_HOME}/include) +if(onnxruntime_USE_CUDA) + target_include_directories(onnxruntime_pybind11_state PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}) + # cudnn_home is optional for Window when cuda and cudnn are installed in the same directory. + if(onnxruntime_CUDNN_HOME) + target_include_directories(onnxruntime_pybind11_state PRIVATE ${onnxruntime_CUDNN_HOME}/include) + endif() endif() if(onnxruntime_USE_CANN) target_include_directories(onnxruntime_pybind11_state PRIVATE ${onnxruntime_CANN_HOME}/include)