Skip to content

Commit

Permalink
Formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
sbaldu committed Dec 14, 2023
1 parent eb104c4 commit 2a3cd21
Show file tree
Hide file tree
Showing 6 changed files with 94 additions and 97 deletions.
12 changes: 6 additions & 6 deletions CLUEstering/alpaka/BindingModules/binding_cpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace alpaka_serial_sync {
const std::vector<float>& weights,
const FlatKernel& kernel,
int Ndim,
size_t block_size = 1024) {
size_t block_size = 1024) {
auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

// Create the queue
Expand Down Expand Up @@ -83,7 +83,7 @@ namespace alpaka_serial_sync {
const std::vector<float>& weights,
const ExponentialKernel& kernel,
int Ndim,
size_t block_size = 1024) {
size_t block_size = 1024) {
auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

// Create the queue
Expand Down Expand Up @@ -145,7 +145,7 @@ namespace alpaka_serial_sync {
const std::vector<float>& weights,
const GaussianKernel& kernel,
int Ndim,
size_t block_size) {
size_t block_size) {
auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

// Create the queue
Expand Down Expand Up @@ -211,7 +211,7 @@ namespace alpaka_serial_sync {
const std::vector<float>&,
const FlatKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");
m.def("mainRun",
pybind11::overload_cast<float,
Expand All @@ -222,7 +222,7 @@ namespace alpaka_serial_sync {
const std::vector<float>&,
const ExponentialKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");
m.def("mainRun",
pybind11::overload_cast<float,
Expand All @@ -233,7 +233,7 @@ namespace alpaka_serial_sync {
const std::vector<float>&,
const GaussianKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");
}
}; // namespace alpaka_serial_sync
20 changes: 10 additions & 10 deletions CLUEstering/alpaka/BindingModules/binding_cpu_tbb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ namespace alpaka_tbb_async {
int pPBin,
const std::vector<std::vector<float>>& coords,
const std::vector<float>& weights,
const FlatKernel& kernel,
const FlatKernel& kernel,
int Ndim,
size_t block_size) {
size_t block_size) {
auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

// Create the queue
Expand Down Expand Up @@ -81,9 +81,9 @@ namespace alpaka_tbb_async {
int pPBin,
const std::vector<std::vector<float>>& coords,
const std::vector<float>& weights,
const ExponentialKernel& kernel,
const ExponentialKernel& kernel,
int Ndim,
size_t block_size) {
size_t block_size) {
auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

// Create the queue
Expand Down Expand Up @@ -143,9 +143,9 @@ namespace alpaka_tbb_async {
int pPBin,
const std::vector<std::vector<float>>& coords,
const std::vector<float>& weights,
const GaussianKernel& kernel,
const GaussianKernel& kernel,
int Ndim,
size_t block_size) {
size_t block_size) {
auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

// Create the queue
Expand Down Expand Up @@ -203,7 +203,7 @@ namespace alpaka_tbb_async {
m.doc() = "Binding of the CLUE algorithm running on CPU with TBB";

/* pybind11::class_<ConvolutionalKernel>(m, "ConvolutionalKernel").def(pybind11::init<>()) */
/* .def("operator()", &ConvolutionalKernel::operator()); */
/* .def("operator()", &ConvolutionalKernel::operator()); */
/* pybind11::class_<FlatKernel, ConvolutionalKernel>(m, "FlatKernel") */
/* .def(pybind11::init<float>()) */
/* .def("operator()", &FlatKernel::operator()); */
Expand All @@ -225,7 +225,7 @@ namespace alpaka_tbb_async {
const std::vector<float>&,
const FlatKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");
m.def("mainRun",
pybind11::overload_cast<float,
Expand All @@ -236,7 +236,7 @@ namespace alpaka_tbb_async {
const std::vector<float>&,
const ExponentialKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");
m.def("mainRun",
pybind11::overload_cast<float,
Expand All @@ -247,7 +247,7 @@ namespace alpaka_tbb_async {
const std::vector<float>&,
const GaussianKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");

/* m.def("mainRun", &mainRun, "mainRun"); */
Expand Down
18 changes: 9 additions & 9 deletions CLUEstering/alpaka/BindingModules/binding_gpu_cuda.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ namespace alpaka_cuda_async {
const std::vector<float>& weights,
const FlatKernel& kernel,
int Ndim,
size_t block_size) {
std::vector<Device> devices = alpaka::getDevs<Platform>();
size_t block_size) {
std::vector<Device> devices = alpaka::getDevs<Platform>();

auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

/* initialise<Platform>(); */
/* initialise<Platform>(); */

// Create the queue
Queue queue_(dev_acc);
Expand Down Expand Up @@ -89,7 +89,7 @@ namespace alpaka_cuda_async {
const std::vector<float>& weights,
const ExponentialKernel& kernel,
int Ndim,
size_t block_size) {
size_t block_size) {
auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

// Create the queue
Expand Down Expand Up @@ -151,7 +151,7 @@ namespace alpaka_cuda_async {
const std::vector<float>& weights,
const GaussianKernel& kernel,
int Ndim,
size_t block_size) {
size_t block_size) {
auto const dev_acc = alpaka::getDevByIdx<Acc1D>(0u);

// Create the queue
Expand Down Expand Up @@ -217,7 +217,7 @@ namespace alpaka_cuda_async {
const std::vector<float>&,
const FlatKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");
m.def("mainRun",
pybind11::overload_cast<float,
Expand All @@ -228,7 +228,7 @@ namespace alpaka_cuda_async {
const std::vector<float>&,
const ExponentialKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");
m.def("mainRun",
pybind11::overload_cast<float,
Expand All @@ -239,7 +239,7 @@ namespace alpaka_cuda_async {
const std::vector<float>&,
const GaussianKernel&,
int,
size_t>(&mainRun),
size_t>(&mainRun),
"mainRun");
}
}; // namespace alpaka_tbb_async
}; // namespace alpaka_cuda_async
9 changes: 3 additions & 6 deletions CLUEstering/alpaka/BindingModules/binding_kernels.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,7 @@
PYBIND11_MODULE(CLUE_Convolutional_Kernels, m) {
m.doc() = "Binding of the convolutional kernels used in the CLUE algorithm.";

pybind11::class_<FlatKernel>(m, "FlatKernel")
.def(pybind11::init<float>());
pybind11::class_<ExponentialKernel>(m, "ExponentialKernel")
.def(pybind11::init<float, float>());
pybind11::class_<GaussianKernel>(m, "GaussianKernel")
.def(pybind11::init<float, float, float>());
pybind11::class_<FlatKernel>(m, "FlatKernel").def(pybind11::init<float>());
pybind11::class_<ExponentialKernel>(m, "ExponentialKernel").def(pybind11::init<float, float>());
pybind11::class_<GaussianKernel>(m, "GaussianKernel").def(pybind11::init<float, float, float>());
}
6 changes: 3 additions & 3 deletions CLUEstering/alpaka/CLUE/CLUEAlgoAlpaka.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE {
PointsAlpaka<Ndim>& d_points,
const KernelType& kernel,
Queue queue_,
size_t block_size);
size_t block_size);

private:
float dc_;
Expand Down Expand Up @@ -105,7 +105,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE {
void CLUEAlgoAlpaka<TAcc, Ndim>::setup(const Points<Ndim>& h_points,
PointsAlpaka<Ndim>& d_points,
Queue queue_,
size_t block_size) {
size_t block_size) {
// Create temporary tiles object
TilesAlpaka<Ndim> temp;
calculate_tile_size(temp, h_points);
Expand Down Expand Up @@ -134,7 +134,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE {
PointsAlpaka<Ndim>& d_points,
const KernelType& kernel,
Queue queue_,
size_t block_size) {
size_t block_size) {
setup(h_points, d_points, queue_, block_size);

const Idx grid_size = cms::alpakatools::divide_up_by(h_points.n, block_size);
Expand Down
Loading

0 comments on commit 2a3cd21

Please sign in to comment.