From c003730c7541f09ffaed9890ca766fedf4b971be Mon Sep 17 00:00:00 2001 From: Tianlei Wu Date: Sun, 4 Feb 2024 00:24:37 +0000 Subject: [PATCH] fix cuda/rocm provider info hash --- .../cuda/cuda_execution_provider_info.h | 39 +++++++++++++++---- .../rocm/rocm_execution_provider_info.h | 34 ++++++++++++---- .../test/python/onnxruntime_test_python.py | 4 ++ .../python/orttraining_python_module.cc | 20 ++-------- 4 files changed, 67 insertions(+), 30 deletions(-) diff --git a/onnxruntime/core/providers/cuda/cuda_execution_provider_info.h b/onnxruntime/core/providers/cuda/cuda_execution_provider_info.h index fc7f501d9299b..1cac3d1513698 100644 --- a/onnxruntime/core/providers/cuda/cuda_execution_provider_info.h +++ b/onnxruntime/core/providers/cuda/cuda_execution_provider_info.h @@ -86,12 +86,37 @@ struct CUDAExecutionProviderInfo { } // namespace onnxruntime template <> -struct std::hash<::onnxruntime::cuda::TunableOpInfo> { - size_t operator()(const ::onnxruntime::cuda::TunableOpInfo& info) const { - size_t seed_and_value{0xbc9f1d34}; - onnxruntime::HashCombine(info.enable, seed_and_value); - onnxruntime::HashCombine(info.tuning_enable, seed_and_value); - onnxruntime::HashCombine(info.max_tuning_duration_ms, seed_and_value); - return seed_and_value; +struct std::hash<::onnxruntime::CUDAExecutionProviderInfo> { + size_t operator()(const ::onnxruntime::CUDAExecutionProviderInfo& info) const { + size_t value{0xbc9f1d34}; // seed + + // Bits: device_id (16), arena_extend_strategy/cudnn_conv_algo_search (reserved 2), boolean options (1 each) + size_t data = static_cast(info.device_id) ^ + (static_cast(info.arena_extend_strategy) << 16) ^ + (static_cast(info.cudnn_conv_algo_search) << 18) ^ + (static_cast(info.do_copy_in_default_stream) << 20) ^ + (static_cast(info.has_user_compute_stream) << 21) ^ + (static_cast(info.cudnn_conv_use_max_workspace) << 22) ^ + (static_cast(info.enable_cuda_graph) << 23) ^ + (static_cast(info.tunable_op.enable) << 24) ^ + (static_cast(info.tunable_op.tuning_enable) << 25) ^ + (static_cast(info.cudnn_conv1d_pad_to_nc1d) << 26) ^ + (static_cast(info.enable_skip_layer_norm_strict_mode) << 27) ^ + (static_cast(info.prefer_nhwc) << 28) ^ + (static_cast(info.use_ep_level_unified_stream) << 29) ^ + (static_cast(info.use_tf32) << 30); + onnxruntime::HashCombine(data, value); + + onnxruntime::HashCombine(info.gpu_mem_limit, value); + onnxruntime::HashCombine(info.tunable_op.max_tuning_duration_ms, value); + + // Memory pointers + onnxruntime::HashCombine(reinterpret_cast(info.user_compute_stream), value); + onnxruntime::HashCombine(reinterpret_cast(info.external_allocator_info.alloc), value); + onnxruntime::HashCombine(reinterpret_cast(info.external_allocator_info.free), value); + onnxruntime::HashCombine(reinterpret_cast(info.external_allocator_info.empty_cache), value); + + // The default memory arena cfg is not used in hashing right now. + return value; } }; diff --git a/onnxruntime/core/providers/rocm/rocm_execution_provider_info.h b/onnxruntime/core/providers/rocm/rocm_execution_provider_info.h index 2f549cc1ac143..c245b18057ca7 100644 --- a/onnxruntime/core/providers/rocm/rocm_execution_provider_info.h +++ b/onnxruntime/core/providers/rocm/rocm_execution_provider_info.h @@ -74,12 +74,32 @@ struct ROCMExecutionProviderInfo { } // namespace onnxruntime template <> -struct std::hash<::onnxruntime::rocm::TunableOpInfo> { - size_t operator()(const ::onnxruntime::rocm::TunableOpInfo& info) const { - size_t seed_and_value{0xbc9f1d34}; - onnxruntime::HashCombine(info.enable, seed_and_value); - onnxruntime::HashCombine(info.tuning_enable, seed_and_value); - onnxruntime::HashCombine(info.max_tuning_duration_ms, seed_and_value); - return seed_and_value; +struct std::hash<::onnxruntime::ROCMExecutionProviderInfo> { + size_t operator()(const ::onnxruntime::ROCMExecutionProviderInfo& info) const { + size_t value{0xbc9f1d34}; // seed + + // Bits: device_id (16), arena_extend_strategy/miopen_conv_exhaustive_search (reserved 2), boolean options (1 each) + size_t data = static_cast(info.device_id) ^ + (static_cast(info.arena_extend_strategy) << 16) ^ + (static_cast(info.miopen_conv_exhaustive_search) << 18) ^ + (static_cast(info.do_copy_in_default_stream) << 20) ^ + (static_cast(info.has_user_compute_stream) << 21) ^ + (static_cast(info.miopen_conv_use_max_workspace) << 22) ^ + (static_cast(info.enable_hip_graph) << 23) ^ + (static_cast(info.tunable_op.enable) << 24) ^ + (static_cast(info.tunable_op.tuning_enable) << 25); + onnxruntime::HashCombine(data, value); + + onnxruntime::HashCombine(info.gpu_mem_limit, value); + onnxruntime::HashCombine(info.tunable_op.max_tuning_duration_ms, value); + + // Memory pointers + onnxruntime::HashCombine(reinterpret_cast(info.user_compute_stream), value); + onnxruntime::HashCombine(reinterpret_cast(info.external_allocator_info.alloc), value); + onnxruntime::HashCombine(reinterpret_cast(info.external_allocator_info.free), value); + onnxruntime::HashCombine(reinterpret_cast(info.external_allocator_info.empty_cache), value); + + // The default memory arena cfg is not used in hashing right now. + return value; } }; diff --git a/onnxruntime/test/python/onnxruntime_test_python.py b/onnxruntime/test/python/onnxruntime_test_python.py index ae2145908de07..91b6c71e735a8 100644 --- a/onnxruntime/test/python/onnxruntime_test_python.py +++ b/onnxruntime/test/python/onnxruntime_test_python.py @@ -414,6 +414,8 @@ def test_get_and_set_option_with_values(option_name, option_values): str(option_value), ) + test_get_and_set_option_with_values("enable_cuda_graph", ["1", "0"]) + test_get_and_set_option_with_values("arena_extend_strategy", ["kNextPowerOfTwo", "kSameAsRequested"]) test_get_and_set_option_with_values("cudnn_conv_algo_search", ["DEFAULT", "EXHAUSTIVE", "HEURISTIC"]) @@ -555,6 +557,8 @@ def test_get_and_set_option_with_values(option_name, option_values): test_get_and_set_option_with_values("tunable_op_max_tuning_duration_ms", ["-1", "1"]) + test_get_and_set_option_with_values("enable_hip_graph", ["1", "0"]) + run_rocm_options_test() def test_invalid_set_providers(self): diff --git a/orttraining/orttraining/python/orttraining_python_module.cc b/orttraining/orttraining/python/orttraining_python_module.cc index 55cd2af2d0219..b0d1ed50af126 100644 --- a/orttraining/orttraining/python/orttraining_python_module.cc +++ b/orttraining/orttraining/python/orttraining_python_module.cc @@ -47,7 +47,7 @@ void addObjectMethodsForLazyTensor(py::module& m); #endif bool InitArray(); -bool GetDyanmicExecutionProviderHash( +bool GetDynamicExecutionProviderHash( const std::string& ep_shared_lib_path, const ProviderOptions& provider_options, size_t& hash, @@ -87,13 +87,7 @@ bool GetProviderInstanceHash(const std::string& type, if (auto* cuda_provider_info = TryGetProviderInfo_CUDA()) { const CUDAExecutionProviderInfo info = GetCudaExecutionProviderInfo(cuda_provider_info, provider_options_map); - hash = static_cast(info.device_id) ^ - info.gpu_mem_limit ^ - (static_cast(info.arena_extend_strategy) << 16) ^ - (static_cast(info.cudnn_conv_algo_search) << 18) ^ - (static_cast(info.do_copy_in_default_stream) << 20) ^ - (static_cast(info.has_user_compute_stream) << 22) ^ - std::hash{}(info.tunable_op); + hash = std::hash{}(info); return true; } #endif @@ -102,13 +96,7 @@ bool GetProviderInstanceHash(const std::string& type, if (auto* rocm_provider_info = TryGetProviderInfo_ROCM()) { const ROCMExecutionProviderInfo info = GetRocmExecutionProviderInfo(rocm_provider_info, provider_options_map); - hash = static_cast(info.device_id) ^ - info.gpu_mem_limit ^ - (static_cast(info.arena_extend_strategy) << 16) ^ - (static_cast(info.miopen_conv_exhaustive_search) << 18) ^ - (static_cast(info.do_copy_in_default_stream) << 20) ^ - (static_cast(info.has_user_compute_stream) << 22) ^ - std::hash{}(info.tunable_op); + hash = std::hash{}(info); return true; } #endif @@ -128,7 +116,7 @@ bool GetProviderInstanceHash(const std::string& type, provider_options.insert(option); } } - return GetDyanmicExecutionProviderHash(shared_lib_path_it->second, provider_options, hash); + return GetDynamicExecutionProviderHash(shared_lib_path_it->second, provider_options, hash); } } }