diff --git a/tools/dynamo/verify_dynamo.py b/tools/dynamo/verify_dynamo.py index 364773b7e1884d..ba2e59e557e224 100644 --- a/tools/dynamo/verify_dynamo.py +++ b/tools/dynamo/verify_dynamo.py @@ -5,10 +5,8 @@ import traceback import warnings -from pkg_resources import packaging - -MIN_CUDA_VERSION = packaging.version.parse("11.6") -MIN_ROCM_VERSION = packaging.version.parse("5.4") +MIN_CUDA_VERSION = "11.6" +MIN_ROCM_VERSION = "5.4" MIN_PYTHON_VERSION = (3, 8) @@ -28,11 +26,12 @@ def check_python(): def check_torch(): import torch - return packaging.version.parse(torch.__version__) + return torch.__version__ # based on torch/utils/cpp_extension.py def get_cuda_version(): + from torch.torch_version import TorchVersion from torch.utils import cpp_extension CUDA_HOME = cpp_extension._find_cuda_home() @@ -50,10 +49,11 @@ def get_cuda_version(): raise VerifyDynamoError("CUDA version not found in `nvcc --version` output") cuda_str_version = cuda_version.group(1) - return packaging.version.parse(cuda_str_version) + return TorchVersion(cuda_str_version) def get_rocm_version(): + from torch.torch_version import TorchVersion from torch.utils import cpp_extension ROCM_HOME = cpp_extension._find_rocm_home() @@ -75,16 +75,17 @@ def get_rocm_version(): hip_str_version = hip_version.group(1) - return packaging.version.parse(hip_str_version) + return TorchVersion(hip_str_version) def check_cuda(): import torch + from torch.torch_version import TorchVersion if not torch.cuda.is_available() or torch.version.hip is not None: return None - torch_cuda_ver = packaging.version.parse(torch.version.cuda) + torch_cuda_ver = TorchVersion(torch.version.cuda) # check if torch cuda version matches system cuda version cuda_ver = get_cuda_version() @@ -112,14 +113,13 @@ def check_cuda(): def check_rocm(): import torch + from torch.torch_version import TorchVersion if not torch.cuda.is_available() or torch.version.hip is None: return None # Extracts main ROCm version from full string - torch_rocm_ver = packaging.version.parse( - ".".join(list(torch.version.hip.split(".")[0:2])) - ) + torch_rocm_ver = TorchVersion(".".join(list(torch.version.hip.split(".")[0:2]))) # check if torch rocm version matches system rocm version rocm_ver = get_rocm_version() diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py index 34bebe6af5fe8e..9cf602206bf8e2 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py @@ -25,7 +25,6 @@ from torch.torch_version import TorchVersion from setuptools.command.build_ext import build_ext -from pkg_resources import packaging # type: ignore[attr-defined] IS_WINDOWS = sys.platform == 'win32' IS_MACOS = sys.platform.startswith('darwin') @@ -402,9 +401,11 @@ def _check_cuda_version(compiler_name: str, compiler_version: TorchVersion) -> N if cuda_version is None: return - cuda_str_version = cuda_version.group(1) - cuda_ver = packaging.version.parse(cuda_str_version) - torch_cuda_version = packaging.version.parse(torch.version.cuda) + cuda_ver = cuda_str_version = cuda_version.group(1) + if torch.version.cuda is None: + return + + torch_cuda_version = TorchVersion(torch.version.cuda) if cuda_ver != torch_cuda_version: # major/minor attributes are only available in setuptools>=49.4.0 if getattr(cuda_ver, "major", None) is None: @@ -1133,7 +1134,7 @@ def CUDAExtension(name, sources, *args, **kwargs): extra_compile_args_dlink += [f'-L{x}' for x in library_dirs] extra_compile_args_dlink += [f'-l{x}' for x in dlink_libraries] - if (torch.version.cuda is not None) and packaging.version.parse(torch.version.cuda) >= packaging.version.parse('11.2'): + if (torch.version.cuda is not None) and TorchVersion(torch.version.cuda) >= '11.2': extra_compile_args_dlink += ['-dlto'] # Device Link Time Optimization started from cuda 11.2 extra_compile_args['nvcc_dlink'] = extra_compile_args_dlink @@ -1449,7 +1450,9 @@ def build_precompile_header(pch_cmd): raise RuntimeError(f"Compile PreCompile Header fail, command: {pch_cmd}") from e extra_cflags_str = listToString(extra_cflags) - extra_include_paths_str = " ".join([f'-I{include}' for include in extra_include_paths]) + extra_include_paths_str = ( + "" if extra_include_paths is None else " ".join([f"-I{include}" for include in extra_include_paths]) + ) lib_include = os.path.join(_TORCH_PATH, 'include') torch_include_dirs = [ @@ -2344,9 +2347,8 @@ def sanitize_flags(flags): # --generate-dependencies-with-compile was added in CUDA 10.2. # Compilation will work on earlier CUDA versions but header file # dependencies are not correctly computed. - required_cuda_version = packaging.version.parse('11.0') - has_cuda_version = torch.version.cuda is not None - if has_cuda_version and packaging.version.parse(torch.version.cuda) >= required_cuda_version: + required_cuda_version = '11.0' + if torch.version.cuda is not None and TorchVersion(torch.version.cuda) >= required_cuda_version: cuda_compile_rule.append(' depfile = $out.d') cuda_compile_rule.append(' deps = gcc') # Note: non-system deps with nvcc are only supported