Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add compute_mapping_indices used by shared memory groupby #17147

Merged
merged 4 commits into from
Oct 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,8 @@ add_library(
src/filling/sequence.cu
src/groupby/groupby.cu
src/groupby/hash/compute_groupby.cu
src/groupby/hash/compute_mapping_indices.cu
src/groupby/hash/compute_mapping_indices_null.cu
src/groupby/hash/compute_single_pass_aggs.cu
src/groupby/hash/create_sparse_results_table.cu
src/groupby/hash/flatten_single_pass_aggs.cpp
Expand Down
35 changes: 35 additions & 0 deletions cpp/src/groupby/hash/compute_mapping_indices.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "compute_mapping_indices.cuh"
#include "compute_mapping_indices.hpp"

namespace cudf::groupby::detail::hash {
template cudf::size_type max_occupancy_grid_size<hash_set_ref_t<cuco::insert_and_find_tag>>(
cudf::size_type n);

template void compute_mapping_indices<hash_set_ref_t<cuco::insert_and_find_tag>>(
cudf::size_type grid_size,
cudf::size_type num,
hash_set_ref_t<cuco::insert_and_find_tag> global_set,
bitmask_type const* row_bitmask,
bool skip_rows_with_nulls,
cudf::size_type* local_mapping_index,
cudf::size_type* global_mapping_index,
cudf::size_type* block_cardinality,
cuda::std::atomic_flag* needs_global_memory_fallback,
rmm::cuda_stream_view stream);
} // namespace cudf::groupby::detail::hash
192 changes: 192 additions & 0 deletions cpp/src/groupby/hash/compute_mapping_indices.cuh
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once

#include "compute_mapping_indices.hpp"
#include "helpers.cuh"

#include <cudf/detail/cuco_helpers.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/cuda.hpp>
#include <cudf/types.hpp>

#include <rmm/cuda_stream_view.hpp>

#include <cooperative_groups.h>
#include <cuco/static_set_ref.cuh>
#include <cuda/std/atomic>
#include <cuda/std/utility>

#include <algorithm>

namespace cudf::groupby::detail::hash {
template <typename SetType>
__device__ void find_local_mapping(cooperative_groups::thread_block const& block,
cudf::size_type idx,
cudf::size_type num_input_rows,
SetType shared_set,
bitmask_type const* row_bitmask,
bool skip_rows_with_nulls,
cudf::size_type* cardinality,
cudf::size_type* local_mapping_index,
cudf::size_type* shared_set_indices)
{
auto const is_valid_input =
idx < num_input_rows and (not skip_rows_with_nulls or cudf::bit_is_set(row_bitmask, idx));
auto const [result_idx, inserted] = [&]() {
if (is_valid_input) {
auto const result = shared_set.insert_and_find(idx);
auto const matched_idx = *result.first;
auto const inserted = result.second;
// inserted a new element
if (result.second) {
auto const shared_set_index = atomicAdd(cardinality, 1);
shared_set_indices[shared_set_index] = idx;
local_mapping_index[idx] = shared_set_index;
}
return cuda::std::pair{matched_idx, inserted};
}
return cuda::std::pair{0, false}; // dummy values
}();
// Syncing the thread block is needed so that updates in `local_mapping_index` are visible to all
// threads in the thread block.
block.sync();
if (is_valid_input) {
// element was already in set
if (!inserted) { local_mapping_index[idx] = local_mapping_index[result_idx]; }
}
}

template <typename SetRef>
__device__ void find_global_mapping(cooperative_groups::thread_block const& block,
cudf::size_type cardinality,
SetRef global_set,
cudf::size_type* shared_set_indices,
cudf::size_type* global_mapping_index)
{
// for all unique keys in shared memory hash set, stores their matches in
// global hash set to `global_mapping_index`
for (auto idx = block.thread_rank(); idx < cardinality; idx += block.num_threads()) {
auto const input_idx = shared_set_indices[idx];
global_mapping_index[block.group_index().x * GROUPBY_SHM_MAX_ELEMENTS + idx] =
*global_set.insert_and_find(input_idx).first;
}
}

/*
* @brief Inserts keys into the shared memory hash set, and stores the block-wise rank for a given
* row index in `local_mapping_index`. If the number of unique keys found in a threadblock exceeds
* `GROUPBY_CARDINALITY_THRESHOLD`, the threads in that block will exit without updating
* `global_set` or setting `global_mapping_index`. Else, we insert the unique keys found to the
* global hash set, and save the row index of the global sparse table in `global_mapping_index`.
*/
template <class SetRef>
CUDF_KERNEL void mapping_indices_kernel(cudf::size_type num_input_rows,
SetRef global_set,
bitmask_type const* row_bitmask,
bool skip_rows_with_nulls,
cudf::size_type* local_mapping_index,
cudf::size_type* global_mapping_index,
cudf::size_type* block_cardinality,
cuda::std::atomic_flag* needs_global_memory_fallback)
{
__shared__ cudf::size_type shared_set_indices[GROUPBY_SHM_MAX_ELEMENTS];

// Shared set initialization
__shared__ cuco::window<cudf::size_type, GROUPBY_WINDOW_SIZE> windows[window_extent.value()];

auto raw_set = cuco::static_set_ref{
cuco::empty_key<cudf::size_type>{cudf::detail::CUDF_SIZE_TYPE_SENTINEL},
global_set.key_eq(),
probing_scheme_t{global_set.hash_function()},
cuco::thread_scope_block,
cuco::aow_storage_ref<cudf::size_type, GROUPBY_WINDOW_SIZE, decltype(window_extent)>{
window_extent, windows}};
auto shared_set = raw_set.rebind_operators(cuco::insert_and_find);

auto const block = cooperative_groups::this_thread_block();
shared_set.initialize(block);

__shared__ cudf::size_type cardinality;
if (block.thread_rank() == 0) { cardinality = 0; }
block.sync();

auto const stride = cudf::detail::grid_1d::grid_stride();

for (auto idx = cudf::detail::grid_1d::global_thread_id();
idx - block.thread_rank() < num_input_rows;
idx += stride) {
find_local_mapping(block,
idx,
num_input_rows,
shared_set,
row_bitmask,
skip_rows_with_nulls,
&cardinality,
local_mapping_index,
shared_set_indices);

block.sync();

if (cardinality >= GROUPBY_CARDINALITY_THRESHOLD) {
if (block.thread_rank() == 0) { needs_global_memory_fallback->test_and_set(); }
break;
}
}

// Insert unique keys from shared to global hash set if block-cardinality
// doesn't exceed the threshold upper-limit
if (cardinality < GROUPBY_CARDINALITY_THRESHOLD) {
find_global_mapping(block, cardinality, global_set, shared_set_indices, global_mapping_index);
}

if (block.thread_rank() == 0) { block_cardinality[block.group_index().x] = cardinality; }
}

template <class SetRef>
cudf::size_type max_occupancy_grid_size(cudf::size_type n)
{
cudf::size_type max_active_blocks{-1};
CUDF_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, mapping_indices_kernel<SetRef>, GROUPBY_BLOCK_SIZE, 0));
auto const grid_size = max_active_blocks * cudf::detail::num_multiprocessors();
auto const num_blocks = cudf::util::div_rounding_up_safe(n, GROUPBY_BLOCK_SIZE);
return std::min(grid_size, num_blocks);
}

template <class SetRef>
void compute_mapping_indices(cudf::size_type grid_size,
cudf::size_type num,
SetRef global_set,
bitmask_type const* row_bitmask,
bool skip_rows_with_nulls,
cudf::size_type* local_mapping_index,
cudf::size_type* global_mapping_index,
cudf::size_type* block_cardinality,
cuda::std::atomic_flag* needs_global_memory_fallback,
rmm::cuda_stream_view stream)
{
mapping_indices_kernel<<<grid_size, GROUPBY_BLOCK_SIZE, 0, stream>>>(
num,
global_set,
row_bitmask,
skip_rows_with_nulls,
local_mapping_index,
global_mapping_index,
block_cardinality,
needs_global_memory_fallback);
}
} // namespace cudf::groupby::detail::hash
43 changes: 43 additions & 0 deletions cpp/src/groupby/hash/compute_mapping_indices.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once

#include <cudf/types.hpp>

#include <rmm/cuda_stream_view.hpp>

#include <cuda/std/atomic>

namespace cudf::groupby::detail::hash {
/*
* @brief Computes the maximum number of active blocks of the given kernel that can be executed on
* the underlying device
*/
template <class SetRef>
[[nodiscard]] cudf::size_type max_occupancy_grid_size(cudf::size_type n);

template <class SetRef>
void compute_mapping_indices(cudf::size_type grid_size,
cudf::size_type num,
SetRef global_set,
bitmask_type const* row_bitmask,
bool skip_rows_with_nulls,
cudf::size_type* local_mapping_index,
cudf::size_type* global_mapping_index,
cudf::size_type* block_cardinality,
cuda::std::atomic_flag* needs_global_memory_fallback,
rmm::cuda_stream_view stream);
} // namespace cudf::groupby::detail::hash
35 changes: 35 additions & 0 deletions cpp/src/groupby/hash/compute_mapping_indices_null.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "compute_mapping_indices.cuh"
#include "compute_mapping_indices.hpp"

namespace cudf::groupby::detail::hash {
template cudf::size_type
max_occupancy_grid_size<nullable_hash_set_ref_t<cuco::insert_and_find_tag>>(cudf::size_type n);

template void compute_mapping_indices<nullable_hash_set_ref_t<cuco::insert_and_find_tag>>(
cudf::size_type grid_size,
cudf::size_type num,
nullable_hash_set_ref_t<cuco::insert_and_find_tag> global_set,
bitmask_type const* row_bitmask,
bool skip_rows_with_nulls,
cudf::size_type* local_mapping_index,
cudf::size_type* global_mapping_index,
cudf::size_type* block_cardinality,
cuda::std::atomic_flag* needs_global_memory_fallback,
rmm::cuda_stream_view stream);
} // namespace cudf::groupby::detail::hash
Loading