Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update CPU-only support for building the project. 更新对仅CPU构建的支持 #1214

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions alphapose/utils/roi_align/roi_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair

from . import roi_align_cuda

try:
from . import roi_align_cuda
except ImportError:
print("Import roi_align_cuda module failed, skipping...")

class RoIAlignFunction(Function):

Expand Down
6 changes: 5 additions & 1 deletion detector/nms/nms_wrapper.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
import numpy as np
import torch

from . import nms_cpu, nms_cuda
try:
from . import nms_cpu, nms_cuda
except ImportError:
print("Import nms_cpu and nms_cuda failed, importing only nms_cpu...")
from . import nms_cpu
from .soft_nms_cpu import soft_nms_cpu


Expand Down
60 changes: 36 additions & 24 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import numpy as np
from Cython.Build import cythonize
from setuptools import Extension, find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
import torch

MAJOR = 0
MINOR = 5
Expand Down Expand Up @@ -116,6 +117,14 @@ def make_cuda_ext(name, module, sources):
})


def make_cpp_ext(name, module, sources):

return CppExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources]
)


def get_ext_modules():
ext_modules = []
# only windows visual studio 2013+ support compile c/cuda extensions
Expand All @@ -128,33 +137,36 @@ def get_ext_modules():
name='soft_nms_cpu',
module='detector.nms',
sources=['src/soft_nms_cpu.pyx']),
make_cuda_ext(
make_cpp_ext(
name='nms_cpu',
module='detector.nms',
sources=['src/nms_cpu.cpp']),
make_cuda_ext(
name='nms_cuda',
module='detector.nms',
sources=['src/nms_cuda.cpp', 'src/nms_kernel.cu']),
make_cuda_ext(
name='roi_align_cuda',
module='alphapose.utils.roi_align',
sources=['src/roi_align_cuda.cpp', 'src/roi_align_kernel.cu']),
make_cuda_ext(
name='deform_conv_cuda',
module='alphapose.models.layers.dcn',
sources=[
'src/deform_conv_cuda.cpp',
'src/deform_conv_cuda_kernel.cu'
]),
make_cuda_ext(
name='deform_pool_cuda',
module='alphapose.models.layers.dcn',
sources=[
'src/deform_pool_cuda.cpp',
'src/deform_pool_cuda_kernel.cu'
]),
]
if torch.cuda.is_available(): # Skip cuda extensions if no GPU available
ext_modules.extend([
make_cuda_ext(
name='nms_cuda',
module='detector.nms',
sources=['src/nms_cuda.cpp', 'src/nms_kernel.cu']),
make_cuda_ext(
name='roi_align_cuda',
module='alphapose.utils.roi_align',
sources=['src/roi_align_cuda.cpp', 'src/roi_align_kernel.cu']),
make_cuda_ext(
name='deform_conv_cuda',
module='alphapose.models.layers.dcn',
sources=[
'src/deform_conv_cuda.cpp',
'src/deform_conv_cuda_kernel.cu'
]),
make_cuda_ext(
name='deform_pool_cuda',
module='alphapose.models.layers.dcn',
sources=[
'src/deform_pool_cuda.cpp',
'src/deform_pool_cuda_kernel.cu'
]),
])
return ext_modules


Expand Down