|
| 1 | +// SPDX-License-Identifier: Apache-2.0 |
| 2 | +// SPDX-FileCopyrightText: Copyright the Vortex contributors |
| 3 | + |
| 4 | +use std::mem::MaybeUninit; |
| 5 | +use std::sync::Arc; |
| 6 | + |
| 7 | +use fastlanes::BitPacking; |
| 8 | +use vortex_array::ExecutionCtx; |
| 9 | +use vortex_array::arrays::FilterArray; |
| 10 | +use vortex_array::arrays::FilterVTable; |
| 11 | +use vortex_array::kernel::ExecuteParentKernel; |
| 12 | +use vortex_array::kernel::ParentKernelSet; |
| 13 | +use vortex_array::matchers::Exact; |
| 14 | +use vortex_buffer::BufferMut; |
| 15 | +use vortex_compute::filter::Filter; |
| 16 | +use vortex_dtype::NativePType; |
| 17 | +use vortex_dtype::PType; |
| 18 | +use vortex_dtype::UnsignedPType; |
| 19 | +use vortex_dtype::match_each_integer_ptype; |
| 20 | +use vortex_error::VortexResult; |
| 21 | +use vortex_mask::Mask; |
| 22 | +use vortex_mask::MaskValues; |
| 23 | +use vortex_vector::Vector; |
| 24 | +use vortex_vector::VectorMutOps; |
| 25 | +use vortex_vector::primitive::PVector; |
| 26 | +use vortex_vector::primitive::PVectorMut; |
| 27 | +use vortex_vector::primitive::PrimitiveVector; |
| 28 | + |
| 29 | +use crate::BitPackedArray; |
| 30 | +use crate::BitPackedVTable; |
| 31 | +use crate::bitpacking::vtable::kernels::UNPACK_CHUNK_THRESHOLD; |
| 32 | +use crate::bitpacking::vtable::kernels::chunked_indices; |
| 33 | + |
| 34 | +pub(crate) const PARENT_KERNELS: ParentKernelSet<BitPackedVTable> = |
| 35 | + ParentKernelSet::new(&[ParentKernelSet::lift(&BitPackingFilterKernel)]); |
| 36 | + |
| 37 | +/// The threshold over which it is faster to fully unpack the entire [`BitPackedArray`] and then |
| 38 | +/// filter the result than to unpack only specific bitpacked values into the output buffer. |
| 39 | +pub const fn unpack_then_filter_threshold<T>() -> f64 { |
| 40 | + // TODO(connor): Where did these numbers come from? Add a public link after validating them. |
| 41 | + // These numbers probably don't work for in-place filtering either. |
| 42 | + match size_of::<T>() { |
| 43 | + 1 => 0.03, |
| 44 | + 2 => 0.03, |
| 45 | + 4 => 0.075, |
| 46 | + _ => 0.09, |
| 47 | + // >8 bytes may have a higher threshold. These numbers are derived from a GCP c2-standard-4 |
| 48 | + // with a "Cascade Lake" CPU. |
| 49 | + } |
| 50 | +} |
| 51 | + |
| 52 | +/// Kernel to execute filtering directly on a bit-packed array. |
| 53 | +#[derive(Debug)] |
| 54 | +struct BitPackingFilterKernel; |
| 55 | + |
| 56 | +impl ExecuteParentKernel<BitPackedVTable> for BitPackingFilterKernel { |
| 57 | + type Parent = Exact<FilterVTable>; |
| 58 | + |
| 59 | + fn parent(&self) -> Self::Parent { |
| 60 | + Exact::from(&FilterVTable) |
| 61 | + } |
| 62 | + |
| 63 | + fn execute_parent( |
| 64 | + &self, |
| 65 | + array: &BitPackedArray, |
| 66 | + parent: &FilterArray, |
| 67 | + _child_idx: usize, |
| 68 | + _ctx: &mut ExecutionCtx, |
| 69 | + ) -> VortexResult<Option<Vector>> { |
| 70 | + let values = match parent.filter_mask() { |
| 71 | + Mask::AllTrue(_) | Mask::AllFalse(_) => { |
| 72 | + // No optimization for full or empty mask |
| 73 | + return Ok(None); |
| 74 | + } |
| 75 | + Mask::Values(values) => values, |
| 76 | + }; |
| 77 | + |
| 78 | + match_each_integer_ptype!(array.ptype(), |I| { |
| 79 | + // If the density is high enough, then we would rather decompress the whole array and then apply |
| 80 | + // a filter over decompressing values one by one. |
| 81 | + if values.density() > unpack_then_filter_threshold::<I>() { |
| 82 | + return Ok(None); |
| 83 | + } |
| 84 | + }); |
| 85 | + |
| 86 | + let primitive_vector: PrimitiveVector = match array.ptype() { |
| 87 | + PType::U8 => filter_primitive::<u8>(array, values)?.into(), |
| 88 | + PType::U16 => filter_primitive::<u16>(array, values)?.into(), |
| 89 | + PType::U32 => filter_primitive::<u32>(array, values)?.into(), |
| 90 | + PType::U64 => filter_primitive::<u64>(array, values)?.into(), |
| 91 | + |
| 92 | + // Since the fastlanes crate only supports unsigned integers, and since we know that all |
| 93 | + // numbers are going to be non-negative, we can safely "cast" to unsigned and back. |
| 94 | + PType::I8 => { |
| 95 | + let pvector = filter_primitive::<u8>(array, values)?; |
| 96 | + unsafe { pvector.transmute::<i8>() }.into() |
| 97 | + } |
| 98 | + PType::I16 => { |
| 99 | + let pvector = filter_primitive::<u16>(array, values)?; |
| 100 | + unsafe { pvector.transmute::<i16>() }.into() |
| 101 | + } |
| 102 | + PType::I32 => { |
| 103 | + let pvector = filter_primitive::<u32>(array, values)?; |
| 104 | + unsafe { pvector.transmute::<i32>() }.into() |
| 105 | + } |
| 106 | + PType::I64 => { |
| 107 | + let pvector = filter_primitive::<u64>(array, values)?; |
| 108 | + unsafe { pvector.transmute::<i64>() }.into() |
| 109 | + } |
| 110 | + other => { |
| 111 | + unreachable!("Unsupported ptype {other} for bitpacking, we also checked this above") |
| 112 | + } |
| 113 | + }; |
| 114 | + |
| 115 | + Ok(Some(primitive_vector.into())) |
| 116 | + } |
| 117 | +} |
| 118 | + |
| 119 | +/// Specialized filter kernel for primitive bit-packed arrays. |
| 120 | +/// |
| 121 | +/// Because the FastLanes bit-packing kernels are only implemented for unsigned types, the provided |
| 122 | +/// `U` should be promoted to the unsigned variant for any target bit width. |
| 123 | +/// For example, if the array is bit-packed `i16`, this function should be called with `U = u16`. |
| 124 | +/// |
| 125 | +/// This function fully decompresses the array for all but the most selective masks because the |
| 126 | +/// FastLanes decompression is so fast and the bookkeepping necessary to decompress individual |
| 127 | +/// elements is relatively slow. |
| 128 | +fn filter_primitive<U: UnsignedPType + BitPacking>( |
| 129 | + array: &BitPackedArray, |
| 130 | + selection: &Arc<MaskValues>, |
| 131 | +) -> VortexResult<PVector<U>> { |
| 132 | + let values = filter_with_indices(array, selection.indices()); |
| 133 | + let validity = array |
| 134 | + .validity_mask() |
| 135 | + .filter(&Mask::Values(selection.clone())) |
| 136 | + .into_mut(); |
| 137 | + |
| 138 | + debug_assert_eq!( |
| 139 | + values.len(), |
| 140 | + validity.len(), |
| 141 | + "`filter_with_indices` was somehow incorrect" |
| 142 | + ); |
| 143 | + |
| 144 | + let mut pvector = unsafe { PVectorMut::new_unchecked(values, validity) }; |
| 145 | + |
| 146 | + // TODO(connor): We want a `PatchesArray` or patching compute functions instead of this. |
| 147 | + let patches = array |
| 148 | + .patches() |
| 149 | + .map(|patches| patches.filter(&Mask::Values(selection.clone()))) |
| 150 | + .transpose()? |
| 151 | + .flatten(); |
| 152 | + if let Some(patches) = patches { |
| 153 | + pvector = patches.apply_to_pvector(pvector); |
| 154 | + } |
| 155 | + |
| 156 | + Ok(pvector.freeze()) |
| 157 | +} |
| 158 | + |
| 159 | +fn filter_with_indices<T: NativePType + BitPacking>( |
| 160 | + array: &BitPackedArray, |
| 161 | + indices: &[usize], |
| 162 | +) -> BufferMut<T> { |
| 163 | + let offset = array.offset() as usize; |
| 164 | + let bit_width = array.bit_width() as usize; |
| 165 | + let mut values = BufferMut::with_capacity(indices.len()); |
| 166 | + |
| 167 | + // Some re-usable memory to store per-chunk indices. |
| 168 | + let mut unpacked = [const { MaybeUninit::<T>::uninit() }; 1024]; |
| 169 | + let packed_bytes = array.packed_slice::<T>(); |
| 170 | + |
| 171 | + // Group the indices by the FastLanes chunk they belong to. |
| 172 | + let chunk_size = 128 * bit_width / size_of::<T>(); |
| 173 | + |
| 174 | + chunked_indices(indices, offset, |chunk_idx, indices_within_chunk| { |
| 175 | + let packed = &packed_bytes[chunk_idx * chunk_size..][..chunk_size]; |
| 176 | + |
| 177 | + if indices_within_chunk.len() == 1024 { |
| 178 | + // Unpack the entire chunk. |
| 179 | + unsafe { |
| 180 | + let values_len = values.len(); |
| 181 | + values.set_len(values_len + 1024); |
| 182 | + BitPacking::unchecked_unpack( |
| 183 | + bit_width, |
| 184 | + packed, |
| 185 | + &mut values.as_mut_slice()[values_len..], |
| 186 | + ); |
| 187 | + } |
| 188 | + } else if indices_within_chunk.len() > UNPACK_CHUNK_THRESHOLD { |
| 189 | + // Unpack into a temporary chunk and then copy the values. |
| 190 | + unsafe { |
| 191 | + let dst: &mut [MaybeUninit<T>] = &mut unpacked; |
| 192 | + let dst: &mut [T] = std::mem::transmute(dst); |
| 193 | + BitPacking::unchecked_unpack(bit_width, packed, dst); |
| 194 | + } |
| 195 | + values.extend_trusted( |
| 196 | + indices_within_chunk |
| 197 | + .iter() |
| 198 | + .map(|&idx| unsafe { unpacked.get_unchecked(idx).assume_init() }), |
| 199 | + ); |
| 200 | + } else { |
| 201 | + // Otherwise, unpack each element individually. |
| 202 | + values.extend_trusted(indices_within_chunk.iter().map(|&idx| unsafe { |
| 203 | + BitPacking::unchecked_unpack_single(bit_width, packed, idx) |
| 204 | + })); |
| 205 | + } |
| 206 | + }); |
| 207 | + |
| 208 | + values |
| 209 | +} |
0 commit comments