Skip to content

Commit

Permalink
fix merge issues
Browse files Browse the repository at this point in the history
  • Loading branch information
pmattione-nvidia committed Oct 29, 2024
1 parent 86ade66 commit c039805
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 9 deletions.
10 changes: 5 additions & 5 deletions cpp/src/io/parquet/decode_fixed.cu
Original file line number Diff line number Diff line change
Expand Up @@ -870,7 +870,7 @@ inline __device__ void bool_plain_decode(page_state_s* s, state_buf* sb, int t,
if (t == 0) { s->dict_pos = pos; }
}

template <int decode_block_size_t, typename stream_type>
template <int rolling_buf_size, typename stream_type>
__device__ int skip_decode(stream_type& parquet_stream, int num_to_skip, int t)
{
// it could be that (e.g.) we skip 5000 but starting at row 4000 we have a run of length 2000:
Expand All @@ -879,7 +879,7 @@ __device__ int skip_decode(stream_type& parquet_stream, int num_to_skip, int t)
int num_skipped = parquet_stream.skip_decode(t, num_to_skip);
while (num_skipped < num_to_skip) {
// TODO: Instead of decoding, skip within the run to the appropriate location
auto const to_decode = min(2 * decode_block_size_t, num_to_skip - num_skipped);
auto const to_decode = min(rolling_buf_size, num_to_skip - num_skipped);
num_skipped += parquet_stream.decode_next(t, to_decode);
__syncthreads();
}
Expand Down Expand Up @@ -1036,11 +1036,11 @@ CUDF_KERNEL void __launch_bounds__(decode_block_size_t, 8)
auto const skipped_leaf_values = s->page.skipped_leaf_values;
if (skipped_leaf_values > 0) {
if (should_process_nulls) {
skip_decode<decode_block_size_t>(def_decoder, skipped_leaf_values, t);
skip_decode<rolling_buf_size>(def_decoder, skipped_leaf_values, t);
}
processed_count = skip_decode<decode_block_size_t>(rep_decoder, skipped_leaf_values, t);
processed_count = skip_decode<rolling_buf_size>(rep_decoder, skipped_leaf_values, t);
if constexpr (has_dict_t) {
skip_decode<decode_block_size_t>(dict_stream, skipped_leaf_values, t);
skip_decode<rolling_buf_size>(dict_stream, skipped_leaf_values, t);
}
}
}
Expand Down
2 changes: 0 additions & 2 deletions cpp/src/io/parquet/parquet_gpu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -994,7 +994,6 @@ void DecodePageDataFixedDict(cudf::detail::hostdevice_span<PageInfo> pages,
* @param[in] level_type_size Size in bytes of the type for level decoding
* @param[in] has_nesting Whether or not the data contains nested (but not list) data.
* @param[in] is_list Whether or not the data contains list data.
* @param[in] is_list Whether or not the data contains list data.
* @param[out] error_code Error code for kernel failures
* @param[in] stream CUDA stream to use
*/
Expand All @@ -1005,7 +1004,6 @@ void DecodeSplitPageFixedWidthData(cudf::detail::hostdevice_span<PageInfo> pages
int level_type_size,
bool has_nesting,
bool is_list,
bool is_list,
kernel_error::pointer error_code,
rmm::cuda_stream_view stream);

Expand Down
4 changes: 2 additions & 2 deletions cpp/src/io/parquet/rle_stream.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ struct rle_stream {
decode_index = -1; // signals the first iteration. Nothing to decode.
}

__device__ inline int get_rle_run_info(rle_run<level_t>& run)
__device__ inline int get_rle_run_info(rle_run& run)
{
run.start = cur;
run.level_run = get_vlq32(run.start, end);
Expand Down Expand Up @@ -383,7 +383,7 @@ struct rle_stream {
// started basically we're setting up the rle_stream vars necessary to start fill_run_batch for
// the first time
while (cur < end) {
rle_run<level_t> run;
rle_run run;
int run_bytes = get_rle_run_info(run);

if ((output_pos + run.size) > target_count) {
Expand Down

0 comments on commit c039805

Please sign in to comment.