-
Notifications
You must be signed in to change notification settings - Fork 562
/
setup.py
300 lines (261 loc) · 11.3 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
# some useful environment variables:
#
# TORCH_CUDA_ARCH_LIST
# specify which CUDA architectures to build for
#
# IGNORE_TORCH_VER
# ignore version requirements for PyTorch
import os
from setuptools import setup, find_packages, dist
import importlib
from pkg_resources import parse_version
import subprocess
import warnings
TORCH_MIN_VER = '1.6.0'
TORCH_MAX_VER = '2.5.1'
CYTHON_MIN_VER = '0.29.37'
IGNORE_TORCH_VER = os.getenv('IGNORE_TORCH_VER') is not None
# Module required before installation
# trying to install it ahead turned out to be too unstable.
torch_spec = importlib.util.find_spec("torch")
if torch_spec is None:
raise ImportError(
f"Kaolin requires PyTorch >={TORCH_MIN_VER}, <={TORCH_MAX_VER}, "
"but couldn't find the module installed."
)
else:
import torch
torch_ver = parse_version(parse_version(torch.__version__).base_version)
if (torch_ver < parse_version(TORCH_MIN_VER) or
torch_ver > parse_version(TORCH_MAX_VER)):
if IGNORE_TORCH_VER:
warnings.warn(
f'Kaolin is compatible with PyTorch >={TORCH_MIN_VER}, <={TORCH_MAX_VER}, '
f'but found version {torch.__version__}. Continuing with the installed '
'version as IGNORE_TORCH_VER is set.'
)
else:
raise ImportError(
f'Kaolin requires PyTorch >={TORCH_MIN_VER}, <={TORCH_MAX_VER}, '
f'but found version {torch.__version__} instead.'
'If you wish to install with this specific version set IGNORE_TORCH_VER=1.'
)
missing_modules = []
cython_spec = importlib.util.find_spec("cython")
if cython_spec is None:
warnings.warn(
f"Kaolin requires cython == {CYTHON_MIN_VER}, "
"but couldn't find the module installed. "
"This setup is gonna try to install it..."
)
missing_modules.append(f'cython=={CYTHON_MIN_VER}')
else:
import Cython
cython_ver = parse_version(Cython.__version__)
if cython_ver != parse_version('0.29.20'):
warnings.warn('Kaolin requires cython == 0.29.20, '
f'but found version {Cython.__version__} instead.')
numpy_spec = importlib.util.find_spec("numpy")
if numpy_spec is None:
warnings.warn(
f"Kaolin requires numpy, but couldn't find the module installed. "
"This setup is gonna try to install it..."
)
missing_modules.append('numpy')
dist.Distribution().fetch_build_eggs(missing_modules)
cython_spec = importlib.util.find_spec("cython")
if cython_spec is None:
raise ImportError(
f"Kaolin requires cython == {CYTHON_MIN_VER} "
"but couldn't find or install it."
)
numpy_spec = importlib.util.find_spec("numpy")
if numpy_spec is None:
raise ImportError(
f"Kaolin requires numpy but couldn't find or install it."
)
import os
import sys
import logging
import glob
import numpy
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
cwd = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger()
logging.basicConfig(format='%(levelname)s - %(message)s')
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
if not torch.cuda.is_available():
if os.getenv('FORCE_CUDA', '0') == '1':
# From: https://github.com/NVIDIA/apex/blob/c4e85f7bf144cb0e368da96d339a6cbd9882cea5/setup.py
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
logging.warning(
"Torch did not find available GPUs on this system.\n"
"If your intention is to cross-compile, this is not an error.\n"
"By default, Kaolin will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0),\n"
"and, if the CUDA version is >= 11.8, Hopper (compute capability 9.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n'
)
if os.getenv("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
if int(bare_metal_minor) == 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
elif int(bare_metal_minor) < 8:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0"
elif int(bare_metal_major) == 12:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print(f'TORCH_CUDA_ARCH_LIST: {os.environ["TORCH_CUDA_ARCH_LIST"]}')
else:
logging.warning(
"Torch did not find available GPUs on this system.\n"
"Kaolin will install only with CPU support and will have very limited features.\n"
"If your wish to cross-compile for GPU `export FORCE_CUDA=1` before running setup.py."
)
PACKAGE_NAME = 'kaolin'
DESCRIPTION = 'Kaolin: A PyTorch library for accelerating 3D deep learning research'
URL = 'https://github.com/NVIDIAGameWorks/kaolin'
AUTHOR = 'NVIDIA'
LICENSE = 'Apache License 2.0'
DOWNLOAD_URL = ''
LONG_DESCRIPTION = """
Kaolin is a PyTorch library aiming to accelerate 3D deep learning research. Kaolin provides efficient implementations
of differentiable 3D modules for use in deep learning systems. With functionality to load and preprocess several popular
3D datasets, and native functions to manipulate meshes, pointclouds, signed distance functions, and voxel grids, Kaolin
mitigates the need to write wasteful boilerplate code. Kaolin packages together several differentiable graphics modules
including rendering, lighting, shading, and view warping. Kaolin also supports an array of loss functions and evaluation
metrics for seamless evaluation and provides visualization functionality to render the 3D results. Importantly, we curate
a comprehensive model zoo comprising many state-of-the-art 3D deep learning architectures, to serve as a starting point
for future research endeavours.
"""
version_txt = os.path.join(cwd, 'version.txt')
with open(version_txt) as f:
version = f.readline().strip()
def write_version_file():
version_path = os.path.join(cwd, 'kaolin', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version))
write_version_file()
def get_requirements():
requirements = []
if sys.version_info >= (3, 10):
warnings.warn("usd-core is not compatible with python_version >= 3.10 "
"and won't be installed, please use supported python_version "
"to use USD related features")
with open(os.path.join(cwd, 'tools', 'viz_requirements.txt'), 'r') as f:
for line in f.readlines():
requirements.append(line.strip())
with open(os.path.join(cwd, 'tools', 'requirements.txt'), 'r') as f:
for line in f.readlines():
requirements.append(line.strip())
return requirements
def get_scripts():
return ['kaolin/experimental/dash3d/kaolin-dash3d']
def get_extensions():
extra_compile_args = {'cxx': ['-O3']}
define_macros = []
include_dirs = []
sources = glob.glob('kaolin/csrc/**/*.cpp', recursive=True)
# FORCE_CUDA is for cross-compilation in docker build
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
with_cuda = True
define_macros += [("WITH_CUDA", None), ("THRUST_IGNORE_CUB_VERSION_CHECK", None)]
sources += glob.glob('kaolin/csrc/**/*.cu', recursive=True)
extension = CUDAExtension
extra_compile_args.update({'nvcc': [
'-O3',
'-DWITH_CUDA',
'-DTHRUST_IGNORE_CUB_VERSION_CHECK'
]})
include_dirs = get_include_dirs()
else:
extension = CppExtension
with_cuda = False
extensions = []
extensions.append(
extension(
name='kaolin._C',
sources=sources,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
include_dirs=include_dirs
)
)
# use cudart_static instead
for extension in extensions:
extension.libraries = ['cudart_static' if x == 'cudart' else x
for x in extension.libraries]
use_cython = True
ext = '.pyx' if use_cython else '.cpp'
cython_extensions = [
CppExtension(
'kaolin.ops.mesh.triangle_hash',
sources=[
f'kaolin/cython/ops/mesh/triangle_hash{ext}'
],
include_dirs=[numpy.get_include()],
),
CppExtension(
'kaolin.ops.conversions.mise',
sources=[
f'kaolin/cython/ops/conversions/mise{ext}'
],
),
]
if use_cython:
from Cython.Build import cythonize
from Cython.Compiler import Options
compiler_directives = Options.get_directive_defaults()
compiler_directives["emit_code_comments"] = False
cython_extensions = cythonize(cython_extensions, language='c++',
compiler_directives=compiler_directives)
return extensions + cython_extensions
def get_include_dirs():
include_dirs = []
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
_, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
if "CUB_HOME" in os.environ:
logging.warning(f'Including CUB_HOME ({os.environ["CUB_HOME"]}).')
include_dirs.append(os.environ["CUB_HOME"])
else:
if int(bare_metal_major) < 11:
logging.warning(f'Including default CUB_HOME ({os.path.join(cwd, "third_party/cub")}).')
include_dirs.append(os.path.join(cwd, 'third_party/cub'))
return include_dirs
if __name__ == '__main__':
setup(
# Metadata
name=PACKAGE_NAME,
version=version,
author=AUTHOR,
description=DESCRIPTION,
url=URL,
long_description=LONG_DESCRIPTION,
license=LICENSE,
python_requires='~=3.7',
# Package info
packages=find_packages(exclude=('docs', 'tests', 'examples')),
scripts=get_scripts(),
include_package_data=True,
install_requires=get_requirements(),
zip_safe=False,
ext_modules=get_extensions(),
cmdclass={
'build_ext': BuildExtension.with_options(no_python_abi_suffix=True)
}
)