Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix workflows post alpaka merge #16

Merged
merged 34 commits into from
Mar 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
879bfdf
Write CMake file for building
sbaldu Jan 18, 2024
cbaab19
Update `setup.py` to use CMake when building the wheel
sbaldu Jan 18, 2024
a0f84cb
Only fetch boost if not found
sbaldu Jan 19, 2024
bd3f6cf
Fix conditional compilation of CUDA
sbaldu Jan 19, 2024
a9fdab3
Add compilation for hip in cmake
sbaldu Jan 19, 2024
bd6d366
Substitute old `CLUEstering.py` file
sbaldu Jan 23, 2024
1176bec
Fix paths of modules when installing the library
sbaldu Jan 23, 2024
b39ae0f
Change extension of cuda binding file to `.cu`
sbaldu Jan 23, 2024
d26a664
Don't use `.cu` files
sbaldu Jan 24, 2024
9aa5355
Small tweaks to CMake file and formatting
sbaldu Jan 24, 2024
479b455
Fix typos
sbaldu Jan 24, 2024
3178d40
Remove pybind11 from toml
sbaldu Jan 26, 2024
3aef2d7
Fix packaging and installation
sbaldu Jan 26, 2024
f84c3fe
Link shared object to `CLUEstering/lib folder`
sbaldu Jan 29, 2024
0f38c8b
Delete copy of `CLUEstering.py`
sbaldu Jan 31, 2024
d77bead
Cleaner check for existence of modules
sbaldu Feb 5, 2024
752f38b
Remove flake linting from workflows
sbaldu Jan 29, 2024
71d6b17
Update path in clang-format workflow
sbaldu Jan 31, 2024
c232ceb
Formatting
sbaldu Jan 31, 2024
808e3bd
Remove unneeded pip packages and compile clue instead of install
sbaldu Jan 31, 2024
f661f79
Set `submodules: true` in python test workflow
sbaldu Jan 31, 2024
3622d3c
Change compilation command
sbaldu Jan 31, 2024
01cd530
Remove test files for periodic coordinates (to readd in the future)
sbaldu Jan 31, 2024
55fad9a
Delete old serial tests
sbaldu Feb 5, 2024
0603b54
Delete old workflow
sbaldu Feb 5, 2024
22b31f8
Fix missing link when building
sbaldu Feb 5, 2024
16ddc35
Update check of output with truth files
sbaldu Feb 5, 2024
268608c
Fix test of `test_blobs`
sbaldu Feb 5, 2024
d84e4ad
Fix `_read_array`
sbaldu Feb 6, 2024
d2391ad
Fix compilation in conda workflow
sbaldu Feb 6, 2024
1b4d568
Sync submodules for conda
sbaldu Feb 6, 2024
f8e8427
Formatting
sbaldu Feb 6, 2024
56c490a
Merge branch 'main' into fix_workflows_post_alpaka
sbaldu Feb 18, 2024
f136c5e
Fix typo in check for modules
sbaldu Feb 19, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/clang_format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
strategy:
matrix:
path:
- 'CLUEstering/include'
- 'CLUEstering/alpaka'
steps:
- uses: actions/checkout@v3
- name: Run clang-format style check
Expand Down
29 changes: 0 additions & 29 deletions .github/workflows/make.yml

This file was deleted.

14 changes: 5 additions & 9 deletions .github/workflows/python-package-anaconda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ jobs:

steps:
- uses: actions/checkout@v3
with:
submodules: true
- name: Set up Python 3.11
uses: actions/setup-python@v3
with:
Expand All @@ -26,17 +28,11 @@ jobs:
- name: Install dependencies
run: |
conda env update --file ${{github.workspace}}/.env/test_env.yml --name base
# - name: Lint with flake8
# run: |
# conda install flake8
# # stop the build if there are Python syntax errors or undefined names
# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
# flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Install locally
- name: Compile CLUEstering modules
working-directory: ${{github.workspace}}
run: |
pip install .
cmake -S . -B build -DCMAKE_BUILD_TYPE=Release && cmake --build build
cmake -B build
- name: Test with pytest
working-directory: ${{github.workspace}}/tests
run: |
Expand Down
16 changes: 6 additions & 10 deletions .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ jobs:
steps:
# Checks out the code in the repository
- uses: actions/checkout@v3
with:
submodules: true
# Sets up Python on the machine with the right version
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
Expand All @@ -28,21 +30,15 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install flake8 pytest
python -m pip install pytest
python -m pip install scikit-learn
python -m pip install pandas
python -m pip install matplotlib
python -m pip install pybind11
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Install CLUEstering
- name: Compile CLUEstering modules
working-directory: ${{github.workspace}}
run: |
pip install .
cmake -S . -B build -DCMAKE_BUILD_TYPE=Release && cmake --build build
cmake -B build
- name: Run tests of the Python library
working-directory: ${{github.workspace}}/tests
run: |
Expand Down
46 changes: 31 additions & 15 deletions CLUEstering/CLUEstering.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
Density based clustering algorithm developed at CERN.
"""

import sys
from dataclasses import dataclass
from glob import glob
import random as rnd
Expand All @@ -16,17 +17,16 @@
from sklearn.preprocessing import StandardScaler
from os.path import dirname, exists, join
path = dirname(__file__)
import sys
sys.path.insert(1, join(path, 'lib'))
import CLUE_Convolutional_Kernels as clue_kernels
import CLUE_CPU_Serial as cpu_serial
tbb_found = exists(*glob(join(path, 'lib/CLUE_CPU_TBB*.so')))
tbb_found = exists(str(*glob(join(path, 'lib/CLUE_CPU_TBB*.so'))))
if tbb_found:
import CLUE_CPU_TBB as cpu_tbb
cuda_found = exists(*glob(join(path, 'lib/CLUE_CPU_TBB*.so')))
cuda_found = exists(str(*glob(join(path, 'lib/CLUE_CPU_CUDA*.so'))))
if cuda_found:
import CLUE_GPU_CUDA as gpu_cuda
hip_found = exists(*glob(join(path, 'lib/CLUE_CPU_TBB*.so')))
hip_found = exists(str(*glob(join(path, 'lib/CLUE_CPU_HIP*.so'))))
if hip_found:
import CLUE_GPU_HIP as gpu_hip

Expand Down Expand Up @@ -236,14 +236,26 @@ def _read_array(self, input_data: Union[list,np.ndarray]) -> None:
None
"""

if len(input_data) < 2 or len(input_data) > 10:
raise ValueError("Inadequate data. The data must contain at least one coordinate" +
" and the weight.")
self.clust_data = clustering_data(np.asarray(input_data[:-1]),
np.copy(np.asarray(input_data[:-1])),
np.asarray(input_data[-1]),
len(input_data[:-1]),
len(input_data[-1]))
# [[x0, x1, x2, ...], [y0, y1, y2, ...], ... , [weights]]
if isinstance(input_data[0][0], (int, float)):
if len(input_data) < 2 or len(input_data) > 11:
raise ValueError("Inadequate data. The supported dimensions are between" +
"1 and 10.")
self.clust_data = clustering_data(np.asarray(input_data[:-1]).T,
np.copy(np.asarray(input_data[:-1]).T),
np.asarray(input_data[-1]),
len(input_data[:-1]),
len(input_data[-1]))
# [[[x0, y0, z0, ...], [x1, y1, z1, ...], ...], [weights]]
else:
if len(input_data) != 2:
raise ValueError("Inadequate data. The data must contain a weight value" +
"for each point.")
self.clust_data = clustering_data(np.asarray(input_data[0]),
np.copy(np.asarray(input_data[0])),
np.asarray(input_data[-1]),
len(input_data[0][0]),
len(input_data[-1]))

def _read_string(self, input_data: str) -> Union[pd.DataFrame,None]:
"""
Expand Down Expand Up @@ -346,7 +358,8 @@ def _rescale(self) -> None:

for dim in range(self.clust_data.n_dim):
self.clust_data.coords.T[dim] = \
self.scaler.fit_transform(self.clust_data.coords.T[dim].reshape(-1, 1)).reshape(1, -1)[0]
self.scaler.fit_transform(
self.clust_data.coords.T[dim].reshape(-1, 1)).reshape(1, -1)[0]

def read_data(self,
input_data: Union[pd.DataFrame,str,dict,list,np.ndarray]) -> None:
Expand Down Expand Up @@ -474,12 +487,15 @@ def choose_kernel(self,
if len(parameters) != 2:
raise ValueError("Wrong number of parameters. The exponential"
+ " kernel requires 2 parameters.")
self.kernel = CLUE_Convolutional_Kernels.ExponentialKernel(parameters[0], parameters[1])
self.kernel = CLUE_Convolutional_Kernels.ExponentialKernel(parameters[0],
parameters[1])
elif choice == "gaus":
if len(parameters) != 3:
raise ValueError("Wrong number of parameters. The gaussian" +
" kernel requires 3 parameters.")
self.kernel = CLUE_Convolutional_Kernels.GaussinKernel(parameters[0], parameters[1], parameters[2])
self.kernel = CLUE_Convolutional_Kernels.GaussinKernel(parameters[0],
parameters[1],
parameters[2])
elif choice == "custom":
if len(parameters) != 0:
raise ValueError("Wrong number of parameters. Custom kernels"
Expand Down
37 changes: 27 additions & 10 deletions CLUEstering/alpaka/AlpakaCore/CachedBufAlloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,24 @@ namespace cms::alpakatools {
namespace traits {

//! The caching memory allocator trait.
template <typename TElem, typename TDim, typename TIdx, typename TDev, typename TQueue, typename TSfinae = void>
template <typename TElem,
typename TDim,
typename TIdx,
typename TDev,
typename TQueue,
typename TSfinae = void>
struct CachedBufAlloc {
static_assert(alpaka::meta::DependentFalseType<TDev>::value, "This device does not support a caching allocator");
static_assert(alpaka::meta::DependentFalseType<TDev>::value,
"This device does not support a caching allocator");
};

//! The caching memory allocator implementation for the CPU device
template <typename TElem, typename TDim, typename TIdx, typename TQueue>
struct CachedBufAlloc<TElem, TDim, TIdx, alpaka::DevCpu, TQueue, void> {
template <typename TExtent>
ALPAKA_FN_HOST static auto allocCachedBuf(alpaka::DevCpu const& dev, TQueue queue, TExtent const& extent)
ALPAKA_FN_HOST static auto allocCachedBuf(alpaka::DevCpu const& dev,
TQueue queue,
TExtent const& extent)
-> alpaka::BufCpu<TElem, TDim, TIdx> {
// non-cached host-only memory
return alpaka::allocAsyncBuf<TElem, TIdx>(queue, extent);
Expand All @@ -35,7 +43,8 @@ namespace cms::alpakatools {
template <typename TExtent>
ALPAKA_FN_HOST static auto allocCachedBuf(alpaka::DevCpu const& dev,
alpaka::QueueCudaRtNonBlocking queue,
TExtent const& extent) -> alpaka::BufCpu<TElem, TDim, TIdx> {
TExtent const& extent)
-> alpaka::BufCpu<TElem, TDim, TIdx> {
ALPAKA_DEBUG_MINIMAL_LOG_SCOPE;

auto& allocator = getHostCachingAllocator<alpaka::QueueCudaRtNonBlocking>();
Expand All @@ -48,15 +57,18 @@ namespace cms::alpakatools {
// use a custom deleter to return the buffer to the CachingAllocator
auto deleter = [alloc = &allocator](TElem* ptr) { alloc->free(ptr); };

return alpaka::BufCpu<TElem, TDim, TIdx>(dev, reinterpret_cast<TElem*>(memPtr), std::move(deleter), extent);
return alpaka::BufCpu<TElem, TDim, TIdx>(
dev, reinterpret_cast<TElem*>(memPtr), std::move(deleter), extent);
}
};

//! The caching memory allocator implementation for the CUDA device
template <typename TElem, typename TDim, typename TIdx, typename TQueue>
struct CachedBufAlloc<TElem, TDim, TIdx, alpaka::DevCudaRt, TQueue, void> {
template <typename TExtent>
ALPAKA_FN_HOST static auto allocCachedBuf(alpaka::DevCudaRt const& dev, TQueue queue, TExtent const& extent)
ALPAKA_FN_HOST static auto allocCachedBuf(alpaka::DevCudaRt const& dev,
TQueue queue,
TExtent const& extent)
-> alpaka::BufCudaRt<TElem, TDim, TIdx> {
ALPAKA_DEBUG_MINIMAL_LOG_SCOPE;

Expand Down Expand Up @@ -88,7 +100,8 @@ namespace cms::alpakatools {
template <typename TExtent>
ALPAKA_FN_HOST static auto allocCachedBuf(alpaka::DevCpu const& dev,
alpaka::QueueHipRtNonBlocking queue,
TExtent const& extent) -> alpaka::BufCpu<TElem, TDim, TIdx> {
TExtent const& extent)
-> alpaka::BufCpu<TElem, TDim, TIdx> {
ALPAKA_DEBUG_MINIMAL_LOG_SCOPE;

auto& allocator = getHostCachingAllocator<alpaka::QueueHipRtNonBlocking>();
Expand All @@ -101,15 +114,18 @@ namespace cms::alpakatools {
// use a custom deleter to return the buffer to the CachingAllocator
auto deleter = [alloc = &allocator](TElem* ptr) { alloc->free(ptr); };

return alpaka::BufCpu<TElem, TDim, TIdx>(dev, reinterpret_cast<TElem*>(memPtr), std::move(deleter), extent);
return alpaka::BufCpu<TElem, TDim, TIdx>(
dev, reinterpret_cast<TElem*>(memPtr), std::move(deleter), extent);
}
};

//! The caching memory allocator implementation for the ROCm/HIP device
template <typename TElem, typename TDim, typename TIdx, typename TQueue>
struct CachedBufAlloc<TElem, TDim, TIdx, alpaka::DevHipRt, TQueue, void> {
template <typename TExtent>
ALPAKA_FN_HOST static auto allocCachedBuf(alpaka::DevHipRt const& dev, TQueue queue, TExtent const& extent)
ALPAKA_FN_HOST static auto allocCachedBuf(alpaka::DevHipRt const& dev,
TQueue queue,
TExtent const& extent)
-> alpaka::BufHipRt<TElem, TDim, TIdx> {
ALPAKA_DEBUG_MINIMAL_LOG_SCOPE;

Expand Down Expand Up @@ -137,7 +153,8 @@ namespace cms::alpakatools {

template <typename TElem, typename TIdx, typename TExtent, typename TQueue, typename TDev>
ALPAKA_FN_HOST auto allocCachedBuf(TDev const& dev, TQueue queue, TExtent const& extent = TExtent()) {
return traits::CachedBufAlloc<TElem, alpaka::Dim<TExtent>, TIdx, TDev, TQueue>::allocCachedBuf(dev, queue, extent);
return traits::CachedBufAlloc<TElem, alpaka::Dim<TExtent>, TIdx, TDev, TQueue>::allocCachedBuf(
dev, queue, extent);
}

} // namespace cms::alpakatools
Expand Down
Loading
Loading