From ba5e7b406b50fd845ac2bbf366eaff5c212dd84a Mon Sep 17 00:00:00 2001 From: Nicholas Gates Date: Wed, 4 Dec 2024 18:09:11 -0500 Subject: [PATCH] Narrow indices types during compression (#1558) Fixes #1557 --- Cargo.lock | 1 + docs/quickstart.rst | 4 +- encodings/datetime-parts/src/compute/mod.rs | 70 ++++++++++--- encodings/fsst/src/canonical.rs | 32 +++--- vortex-array/src/array/sparse/mod.rs | 18 ++-- vortex-array/src/compute/cast.rs | 30 ++++-- vortex-dtype/src/dtype.rs | 6 -- vortex-sampling-compressor/Cargo.toml | 1 + .../src/compressors/date_time_parts.rs | 22 +++-- .../src/compressors/dict.rs | 8 +- .../src/compressors/fsst.rs | 3 +- .../src/compressors/list.rs | 3 +- .../src/compressors/runend.rs | 8 +- .../src/compressors/runend_bool.rs | 5 +- .../src/compressors/sparse.rs | 3 +- .../src/compressors/varbin.rs | 3 +- vortex-sampling-compressor/src/downscale.rs | 98 +++++++++++++++++++ vortex-sampling-compressor/src/lib.rs | 1 + vortex-scalar/src/primitive.rs | 2 +- 19 files changed, 240 insertions(+), 78 deletions(-) create mode 100644 vortex-sampling-compressor/src/downscale.rs diff --git a/Cargo.lock b/Cargo.lock index 4d427e48b9..91bde3b5b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5063,6 +5063,7 @@ dependencies = [ "fsst-rs", "itertools 0.13.0", "log", + "num-traits", "rand", "vortex-alp", "vortex-array", diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 07d471ed8c..b79fc6c5b0 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -46,9 +46,9 @@ Use :func:`~vortex.encoding.compress` to compress the Vortex array and check the >>> cvtx = vortex.compress(vtx) >>> cvtx.nbytes - 17780 + 16835 >>> cvtx.nbytes / vtx.nbytes - 0.126... + 0.119... Vortex uses nearly ten times fewer bytes than Arrow. Fewer bytes means more of your data fits in cache and RAM. diff --git a/encodings/datetime-parts/src/compute/mod.rs b/encodings/datetime-parts/src/compute/mod.rs index a1863e28b8..31b523c104 100644 --- a/encodings/datetime-parts/src/compute/mod.rs +++ b/encodings/datetime-parts/src/compute/mod.rs @@ -1,17 +1,17 @@ mod filter; mod take; -use itertools::Itertools as _; use vortex_array::array::{PrimitiveArray, TemporalArray}; use vortex_array::compute::{ - scalar_at, slice, ComputeVTable, FilterFn, ScalarAtFn, SliceFn, TakeFn, + scalar_at, slice, try_cast, ComputeVTable, FilterFn, ScalarAtFn, SliceFn, TakeFn, }; use vortex_array::validity::ArrayValidity; use vortex_array::{ArrayDType, ArrayData, IntoArrayData, IntoArrayVariant}; use vortex_datetime_dtype::{TemporalMetadata, TimeUnit}; -use vortex_dtype::DType; -use vortex_error::{vortex_bail, VortexResult}; -use vortex_scalar::Scalar; +use vortex_dtype::Nullability::NonNullable; +use vortex_dtype::{DType, PType}; +use vortex_error::{vortex_bail, VortexExpect, VortexResult}; +use vortex_scalar::{PrimitiveScalar, Scalar}; use crate::{DateTimePartsArray, DateTimePartsEncoding}; @@ -106,17 +106,55 @@ pub fn decode_to_temporal(array: &DateTimePartsArray) -> VortexResult vortex_bail!(InvalidArgument: "cannot decode into TimeUnit::D"), }; - let days_buf = array.days().into_primitive()?; - let seconds_buf = array.seconds().into_primitive()?; - let subsecond_buf = array.subsecond().into_primitive()?; - - let values = days_buf - .maybe_null_slice::() - .iter() - .zip_eq(seconds_buf.maybe_null_slice::().iter()) - .zip_eq(subsecond_buf.maybe_null_slice::().iter()) - .map(|((d, s), ss)| d * 86_400 * divisor + s * divisor + ss) - .collect::>(); + let days_buf = try_cast( + array.days(), + &DType::Primitive(PType::I64, array.dtype().nullability()), + )? + .into_primitive()?; + let mut values: Vec = days_buf + .into_maybe_null_slice::() + .into_iter() + .map(|d| d * 86_400 * divisor) + .collect(); + + if let Some(seconds) = array.seconds().as_constant() { + let seconds = + PrimitiveScalar::try_from(&seconds.cast(&DType::Primitive(PType::I64, NonNullable))?)? + .typed_value::() + .vortex_expect("non-nullable"); + for v in values.iter_mut() { + *v += seconds * divisor; + } + } else { + let seconds_buf = try_cast(array.seconds(), &DType::Primitive(PType::U32, NonNullable))? + .into_primitive()?; + for (v, second) in values.iter_mut().zip(seconds_buf.maybe_null_slice::()) { + *v += (*second as i64) * divisor; + } + } + + if let Some(subseconds) = array.subsecond().as_constant() { + let subseconds = PrimitiveScalar::try_from( + &subseconds.cast(&DType::Primitive(PType::I64, NonNullable))?, + )? + .typed_value::() + .vortex_expect("non-nullable"); + for v in values.iter_mut() { + *v += subseconds; + } + } else { + let subsecond_buf = try_cast( + array.subsecond(), + &DType::Primitive(PType::I64, NonNullable), + )? + .into_primitive()?; + for (v, subsecond) in values + .iter_mut() + .zip(subsecond_buf.maybe_null_slice::()) + { + *v += *subsecond; + } + } Ok(TemporalArray::new_timestamp( PrimitiveArray::from_vec(values, array.validity()).into_array(), diff --git a/encodings/fsst/src/canonical.rs b/encodings/fsst/src/canonical.rs index 425fa33922..ee7e4866be 100644 --- a/encodings/fsst/src/canonical.rs +++ b/encodings/fsst/src/canonical.rs @@ -1,9 +1,11 @@ use arrow_array::builder::make_view; use arrow_buffer::Buffer; use vortex_array::array::{PrimitiveArray, VarBinArray, VarBinViewArray}; +use vortex_array::variants::PrimitiveArrayTrait; use vortex_array::{ ArrayDType, ArrayData, Canonical, IntoArrayData, IntoArrayVariant, IntoCanonical, }; +use vortex_dtype::match_each_integer_ptype; use vortex_error::VortexResult; use crate::FSSTArray; @@ -33,24 +35,26 @@ impl IntoCanonical for FSSTArray { .uncompressed_lengths() .into_canonical()? .into_primitive()?; - let uncompressed_lens_slice = uncompressed_lens_array.maybe_null_slice::(); // Directly create the binary views. - let views: Vec = uncompressed_lens_slice - .iter() - .scan(0, |offset, len| { - let str_start = *offset; - let str_end = *offset + len; + let views: Vec = match_each_integer_ptype!(uncompressed_lens_array.ptype(), |$P| { + uncompressed_lens_array.maybe_null_slice::<$P>() + .iter() + .map(|&len| len as usize) + .scan(0, |offset, len| { + let str_start = *offset; + let str_end = *offset + len; - *offset += len; + *offset += len; - Some(make_view( - &uncompressed_bytes[(str_start as usize)..(str_end as usize)], - 0u32, - str_start as u32, - )) - }) - .collect(); + Some(make_view( + &uncompressed_bytes[str_start..str_end], + 0u32, + str_start as u32, + )) + }) + .collect() + }); let views_array: ArrayData = Buffer::from(views).into(); let uncompressed_bytes_array = PrimitiveArray::from(uncompressed_bytes).into_array(); diff --git a/vortex-array/src/array/sparse/mod.rs b/vortex-array/src/array/sparse/mod.rs index 0c6f4db45d..ba7d41e454 100644 --- a/vortex-array/src/array/sparse/mod.rs +++ b/vortex-array/src/array/sparse/mod.rs @@ -1,7 +1,8 @@ use std::fmt::{Debug, Display}; use ::serde::{Deserialize, Serialize}; -use vortex_dtype::{match_each_integer_ptype, DType}; +use vortex_dtype::Nullability::NonNullable; +use vortex_dtype::{match_each_integer_ptype, DType, PType}; use vortex_error::{vortex_bail, vortex_panic, VortexExpect as _, VortexResult}; use vortex_scalar::{Scalar, ScalarValue}; @@ -27,8 +28,8 @@ pub struct SparseMetadata { // Offset value for patch indices as a result of slicing indices_offset: usize, indices_len: usize, + indices_ptype: PType, fill_value: ScalarValue, - u64_indices: bool, } impl Display for SparseMetadata { @@ -54,9 +55,6 @@ impl SparseArray { indices_offset: usize, fill_value: Scalar, ) -> VortexResult { - if !matches!(indices.dtype(), &DType::IDX | &DType::IDX_32) { - vortex_bail!("Cannot use {} as indices", indices.dtype()); - } if fill_value.dtype() != values.dtype() { vortex_bail!( "fill value, {:?}, should be instance of values dtype, {}", @@ -80,14 +78,16 @@ impl SparseArray { } } + let indices_ptype = PType::try_from(indices.dtype())?; + Self::try_from_parts( values.dtype().clone(), len, SparseMetadata { indices_offset, indices_len: indices.len(), + indices_ptype, fill_value: fill_value.into_value(), - u64_indices: matches!(indices.dtype(), &DType::IDX), }, [indices, values].into(), StatsSet::default(), @@ -111,11 +111,7 @@ impl SparseArray { self.as_ref() .child( 0, - if self.metadata().u64_indices { - &DType::IDX - } else { - &DType::IDX_32 - }, + &DType::Primitive(self.metadata().indices_ptype, NonNullable), self.metadata().indices_len, ) .vortex_expect("Missing indices array in SparseArray") diff --git a/vortex-array/src/compute/cast.rs b/vortex-array/src/compute/cast.rs index d8c287d51e..7dedabcff9 100644 --- a/vortex-array/src/compute/cast.rs +++ b/vortex-array/src/compute/cast.rs @@ -1,8 +1,8 @@ use vortex_dtype::DType; -use vortex_error::{vortex_err, VortexError, VortexResult}; +use vortex_error::{vortex_bail, vortex_err, VortexError, VortexResult}; use crate::encoding::Encoding; -use crate::{ArrayDType, ArrayData}; +use crate::{ArrayDType, ArrayData, IntoArrayData, IntoCanonical}; pub trait CastFn { fn cast(&self, array: &Array, dtype: &DType) -> VortexResult; @@ -34,9 +34,25 @@ pub fn try_cast(array: impl AsRef, dtype: &DType) -> VortexResult Nullability { self.is_nullable().into() diff --git a/vortex-sampling-compressor/Cargo.toml b/vortex-sampling-compressor/Cargo.toml index 177d8421e5..396890c768 100644 --- a/vortex-sampling-compressor/Cargo.toml +++ b/vortex-sampling-compressor/Cargo.toml @@ -18,6 +18,7 @@ arbitrary = { workspace = true, optional = true } fsst-rs = { workspace = true } itertools = { workspace = true } log = { workspace = true } +num-traits = { workspace = true } rand = { workspace = true } vortex-alp = { workspace = true } vortex-array = { workspace = true } diff --git a/vortex-sampling-compressor/src/compressors/date_time_parts.rs b/vortex-sampling-compressor/src/compressors/date_time_parts.rs index fbb9938a41..6a2f3a467f 100644 --- a/vortex-sampling-compressor/src/compressors/date_time_parts.rs +++ b/vortex-sampling-compressor/src/compressors/date_time_parts.rs @@ -10,6 +10,7 @@ use vortex_datetime_parts::{ use vortex_error::VortexResult; use crate::compressors::{CompressedArray, CompressionTree, EncodingCompressor}; +use crate::downscale::downscale_integer_array; use crate::{constants, SamplingCompressor}; #[derive(Debug)] @@ -48,15 +49,18 @@ impl EncodingCompressor for DateTimePartsCompressor { subseconds, } = split_temporal(TemporalArray::try_from(array.clone())?)?; - let days = ctx - .named("days") - .compress(&days, like.as_ref().and_then(|l| l.child(0)))?; - let seconds = ctx - .named("seconds") - .compress(&seconds, like.as_ref().and_then(|l| l.child(1)))?; - let subsecond = ctx - .named("subsecond") - .compress(&subseconds, like.as_ref().and_then(|l| l.child(2)))?; + let days = ctx.named("days").compress( + &downscale_integer_array(days)?, + like.as_ref().and_then(|l| l.child(0)), + )?; + let seconds = ctx.named("seconds").compress( + &downscale_integer_array(seconds)?, + like.as_ref().and_then(|l| l.child(1)), + )?; + let subsecond = ctx.named("subsecond").compress( + &downscale_integer_array(subseconds)?, + like.as_ref().and_then(|l| l.child(2)), + )?; Ok(CompressedArray::compressed( DateTimePartsArray::try_new( array.dtype().clone(), diff --git a/vortex-sampling-compressor/src/compressors/dict.rs b/vortex-sampling-compressor/src/compressors/dict.rs index c088edbce6..112861deef 100644 --- a/vortex-sampling-compressor/src/compressors/dict.rs +++ b/vortex-sampling-compressor/src/compressors/dict.rs @@ -12,6 +12,7 @@ use vortex_dict::{ use vortex_error::VortexResult; use crate::compressors::{CompressedArray, CompressionTree, EncodingCompressor}; +use crate::downscale::downscale_integer_array; use crate::{constants, SamplingCompressor}; #[derive(Debug)] @@ -70,9 +71,10 @@ impl EncodingCompressor for DictCompressor { }; let (codes, values) = ( - ctx.auxiliary("codes") - .excluding(self) - .compress(&codes, like.as_ref().and_then(|l| l.child(0)))?, + ctx.auxiliary("codes").excluding(self).compress( + &downscale_integer_array(codes)?, + like.as_ref().and_then(|l| l.child(0)), + )?, ctx.named("values") .excluding(self) .compress(&values, like.as_ref().and_then(|l| l.child(1)))?, diff --git a/vortex-sampling-compressor/src/compressors/fsst.rs b/vortex-sampling-compressor/src/compressors/fsst.rs index f144357477..070b047f7e 100644 --- a/vortex-sampling-compressor/src/compressors/fsst.rs +++ b/vortex-sampling-compressor/src/compressors/fsst.rs @@ -17,6 +17,7 @@ use super::delta::DeltaCompressor; use super::r#for::FoRCompressor; use super::varbin::VarBinCompressor; use super::{CompressedArray, CompressionTree, EncoderMetadata, EncodingCompressor}; +use crate::downscale::downscale_integer_array; use crate::{constants, SamplingCompressor}; #[derive(Debug)] @@ -109,7 +110,7 @@ impl EncodingCompressor for FSSTCompressor { .auxiliary("uncompressed_lengths") .excluding(self) .compress( - &fsst_array.uncompressed_lengths(), + &downscale_integer_array(fsst_array.uncompressed_lengths())?, like.as_ref().and_then(|l| l.child(3)), )?; diff --git a/vortex-sampling-compressor/src/compressors/list.rs b/vortex-sampling-compressor/src/compressors/list.rs index a47427878b..9501e1ba59 100644 --- a/vortex-sampling-compressor/src/compressors/list.rs +++ b/vortex-sampling-compressor/src/compressors/list.rs @@ -6,6 +6,7 @@ use vortex_array::{ArrayData, IntoArrayData}; use vortex_error::VortexResult; use crate::compressors::{CompressedArray, CompressionTree, EncodingCompressor}; +use crate::downscale::downscale_integer_array; use crate::{constants, SamplingCompressor}; #[derive(Debug)] @@ -36,7 +37,7 @@ impl EncodingCompressor for ListCompressor { like.as_ref().and_then(|l| l.child(0)), )?; let compressed_offsets = ctx.auxiliary("offsets").compress( - &list_array.offsets(), + &downscale_integer_array(list_array.offsets())?, like.as_ref().and_then(|l| l.child(1)), )?; Ok(CompressedArray::compressed( diff --git a/vortex-sampling-compressor/src/compressors/runend.rs b/vortex-sampling-compressor/src/compressors/runend.rs index e53f09e63b..e250d14ecf 100644 --- a/vortex-sampling-compressor/src/compressors/runend.rs +++ b/vortex-sampling-compressor/src/compressors/runend.rs @@ -8,6 +8,7 @@ use vortex_runend::compress::runend_encode; use vortex_runend::{RunEndArray, RunEndEncoding}; use crate::compressors::{CompressedArray, CompressionTree, EncodingCompressor}; +use crate::downscale::downscale_integer_array; use crate::{constants, SamplingCompressor}; pub const DEFAULT_RUN_END_COMPRESSOR: RunEndCompressor = RunEndCompressor { ree_threshold: 2.0 }; @@ -52,9 +53,10 @@ impl EncodingCompressor for RunEndCompressor { let primitive_array = array.clone().into_primitive()?; let (ends, values) = runend_encode(&primitive_array); - let compressed_ends = ctx - .auxiliary("ends") - .compress(&ends.into_array(), like.as_ref().and_then(|l| l.child(0)))?; + let compressed_ends = ctx.auxiliary("ends").compress( + &downscale_integer_array(ends.into_array())?, + like.as_ref().and_then(|l| l.child(0)), + )?; let compressed_values = ctx .named("values") .excluding(self) diff --git a/vortex-sampling-compressor/src/compressors/runend_bool.rs b/vortex-sampling-compressor/src/compressors/runend_bool.rs index bc1fef4d8e..d02aad067b 100644 --- a/vortex-sampling-compressor/src/compressors/runend_bool.rs +++ b/vortex-sampling-compressor/src/compressors/runend_bool.rs @@ -8,6 +8,7 @@ use vortex_runend_bool::compress::runend_bool_encode_slice; use vortex_runend_bool::{RunEndBoolArray, RunEndBoolEncoding}; use crate::compressors::{CompressedArray, CompressionTree, EncodingCompressor}; +use crate::downscale::downscale_integer_array; use crate::{constants, SamplingCompressor}; #[derive(Debug)] @@ -39,11 +40,11 @@ impl EncodingCompressor for RunEndBoolCompressor { ) -> VortexResult> { let bool_array = array.clone().into_bool()?; let (ends, start) = runend_bool_encode_slice(&bool_array.boolean_buffer()); - let ends = PrimitiveArray::from(ends); + let ends = downscale_integer_array(PrimitiveArray::from(ends).into_array())?; let compressed_ends = ctx .auxiliary("ends") - .compress(&ends.into_array(), like.as_ref().and_then(|l| l.child(0)))?; + .compress(&ends, like.as_ref().and_then(|l| l.child(0)))?; Ok(CompressedArray::compressed( RunEndBoolArray::try_new(compressed_ends.array, start, bool_array.validity())? diff --git a/vortex-sampling-compressor/src/compressors/sparse.rs b/vortex-sampling-compressor/src/compressors/sparse.rs index b482b1231f..b597f32e4e 100644 --- a/vortex-sampling-compressor/src/compressors/sparse.rs +++ b/vortex-sampling-compressor/src/compressors/sparse.rs @@ -6,6 +6,7 @@ use vortex_array::{ArrayData, ArrayLen, IntoArrayData}; use vortex_error::VortexResult; use crate::compressors::{CompressedArray, CompressionTree, EncodingCompressor}; +use crate::downscale::downscale_integer_array; use crate::{constants, SamplingCompressor}; #[derive(Debug)] @@ -32,7 +33,7 @@ impl EncodingCompressor for SparseCompressor { ) -> VortexResult> { let sparse_array = SparseArray::try_from(array.clone())?; let indices = ctx.auxiliary("indices").compress( - &sparse_array.indices(), + &downscale_integer_array(sparse_array.indices())?, like.as_ref().and_then(|l| l.child(0)), )?; let values = ctx.named("values").compress( diff --git a/vortex-sampling-compressor/src/compressors/varbin.rs b/vortex-sampling-compressor/src/compressors/varbin.rs index 984f08e0b5..20ec03308b 100644 --- a/vortex-sampling-compressor/src/compressors/varbin.rs +++ b/vortex-sampling-compressor/src/compressors/varbin.rs @@ -6,6 +6,7 @@ use vortex_array::{ArrayDType, ArrayData, IntoArrayData}; use vortex_error::VortexResult; use crate::compressors::{CompressedArray, CompressionTree, EncodingCompressor}; +use crate::downscale::downscale_integer_array; use crate::{constants, SamplingCompressor}; #[derive(Debug)] @@ -32,7 +33,7 @@ impl EncodingCompressor for VarBinCompressor { ) -> VortexResult> { let varbin_array = VarBinArray::try_from(array.clone())?; let offsets = ctx.auxiliary("offsets").compress( - &varbin_array.offsets(), + &downscale_integer_array(varbin_array.offsets())?, like.as_ref().and_then(|l| l.child(0)), )?; Ok(CompressedArray::compressed( diff --git a/vortex-sampling-compressor/src/downscale.rs b/vortex-sampling-compressor/src/downscale.rs new file mode 100644 index 0000000000..d0544818f6 --- /dev/null +++ b/vortex-sampling-compressor/src/downscale.rs @@ -0,0 +1,98 @@ +use vortex_array::array::{PrimitiveArray, PrimitiveEncoding}; +use vortex_array::compute::try_cast; +use vortex_array::encoding::EncodingVTable; +use vortex_array::stats::{ArrayStatistics, Stat}; +use vortex_array::{ArrayDType, ArrayData, IntoArrayData, IntoArrayVariant}; +use vortex_dtype::{DType, PType}; +use vortex_error::{vortex_err, VortexResult}; + +/// Downscale a primitive array to the narrowest PType that fits all the values. +pub fn downscale_integer_array(array: ArrayData) -> VortexResult { + if !array.is_encoding(PrimitiveEncoding.id()) { + // This can happen if e.g. the array is ConstantArray. + return Ok(array); + } + let array = PrimitiveArray::try_from(array)?; + + let min = array + .statistics() + .compute(Stat::Min) + .ok_or_else(|| vortex_err!("Failed to compute min on primitive array"))?; + let max = array + .statistics() + .compute(Stat::Max) + .ok_or_else(|| vortex_err!("Failed to compute max on primitive array"))?; + + // If we can't cast to i64, then leave the array as its original type. + // It's too big to downcast anyway. + let Ok(min) = min.cast(&DType::Primitive(PType::I64, array.dtype().nullability())) else { + return Ok(array.into_array()); + }; + let Ok(max) = max.cast(&DType::Primitive(PType::I64, array.dtype().nullability())) else { + return Ok(array.into_array()); + }; + + downscale_primitive_integer_array(array, i64::try_from(min)?, i64::try_from(max)?) + .map(|a| a.into_array()) +} + +/// Downscale a primitive array to the narrowest PType that fits all the values. +fn downscale_primitive_integer_array( + array: PrimitiveArray, + min: i64, + max: i64, +) -> VortexResult { + if min < 0 || max < 0 { + // Signed + if min >= i8::MIN as i64 && max <= i8::MAX as i64 { + return try_cast( + &array, + &DType::Primitive(PType::I8, array.dtype().nullability()), + )? + .into_primitive(); + } + + if min >= i16::MIN as i64 && max <= i16::MAX as i64 { + return try_cast( + &array, + &DType::Primitive(PType::I16, array.dtype().nullability()), + )? + .into_primitive(); + } + + if min >= i32::MIN as i64 && max <= i32::MAX as i64 { + return try_cast( + &array, + &DType::Primitive(PType::I32, array.dtype().nullability()), + )? + .into_primitive(); + } + } else { + // Unsigned + if max <= u8::MAX as i64 { + return try_cast( + &array, + &DType::Primitive(PType::U8, array.dtype().nullability()), + )? + .into_primitive(); + } + + if max <= u16::MAX as i64 { + return try_cast( + &array, + &DType::Primitive(PType::U16, array.dtype().nullability()), + )? + .into_primitive(); + } + + if max <= u32::MAX as i64 { + return try_cast( + &array, + &DType::Primitive(PType::U32, array.dtype().nullability()), + )? + .into_primitive(); + } + } + + Ok(array) +} diff --git a/vortex-sampling-compressor/src/lib.rs b/vortex-sampling-compressor/src/lib.rs index 058739239d..1ca764d84f 100644 --- a/vortex-sampling-compressor/src/lib.rs +++ b/vortex-sampling-compressor/src/lib.rs @@ -39,6 +39,7 @@ use crate::compressors::zigzag::ZigZagCompressor; pub mod arbitrary; pub mod compressors; mod constants; +mod downscale; mod sampling; mod sampling_compressor; diff --git a/vortex-scalar/src/primitive.rs b/vortex-scalar/src/primitive.rs index 054deca4ea..f0c5fa0979 100644 --- a/vortex-scalar/src/primitive.rs +++ b/vortex-scalar/src/primitive.rs @@ -71,7 +71,7 @@ impl<'a> PrimitiveScalar<'a> { match_each_native_ptype!(self.ptype(), |$T| { Ok(Scalar::primitive::<$Q>( <$Q as NumCast>::from(self.typed_value::<$T>().expect("Invalid value")) - .ok_or_else(|| vortex_err!("Can't cast {} scalar to {}", self.ptype, dtype))?, + .ok_or_else(|| vortex_err!("Can't cast {} scalar {} to {}", self.ptype, self.typed_value::<$T>().expect("Invalid value"), dtype))?, dtype.nullability(), )) })