|
| 1 | +// SPDX-License-Identifier: Apache-2.0 |
| 2 | +// SPDX-FileCopyrightText: Copyright the Vortex contributors |
| 3 | + |
| 4 | +use std::mem::MaybeUninit; |
| 5 | + |
| 6 | +use fastlanes::BitPacking; |
| 7 | +use vortex_array::ExecutionCtx; |
| 8 | +use vortex_array::IntoArray; |
| 9 | +use vortex_array::VectorExecutor; |
| 10 | +use vortex_array::arrays::FilterArray; |
| 11 | +use vortex_array::arrays::FilterVTable; |
| 12 | +use vortex_array::kernel::ExecuteParentKernel; |
| 13 | +use vortex_array::matchers::Exact; |
| 14 | +use vortex_array::patches::patch_pvector; |
| 15 | +use vortex_buffer::Buffer; |
| 16 | +use vortex_buffer::BufferMut; |
| 17 | +use vortex_compute::filter::Filter; |
| 18 | +use vortex_dtype::NativePType; |
| 19 | +use vortex_dtype::PType; |
| 20 | +use vortex_dtype::UnsignedPType; |
| 21 | +use vortex_dtype::match_each_integer_ptype; |
| 22 | +use vortex_error::VortexExpect; |
| 23 | +use vortex_error::VortexResult; |
| 24 | +use vortex_mask::Mask; |
| 25 | +use vortex_vector::Vector; |
| 26 | +use vortex_vector::VectorMut; |
| 27 | +use vortex_vector::VectorMutOps; |
| 28 | +use vortex_vector::primitive::PVector; |
| 29 | +use vortex_vector::primitive::PrimitiveVector; |
| 30 | + |
| 31 | +use crate::BitPackedArray; |
| 32 | +use crate::BitPackedVTable; |
| 33 | +use crate::bitpacking::kernels::UNPACK_CHUNK_THRESHOLD; |
| 34 | +use crate::bitpacking::kernels::chunked_indices; |
| 35 | + |
| 36 | +/// The threshold over which it is faster to fully unpack the entire [`BitPackedArray`] and then |
| 37 | +/// filter the result than to unpack only specific bitpacked values into the output buffer. |
| 38 | +pub const fn unpack_then_filter_threshold<T>() -> f64 { |
| 39 | + // TODO(connor): Where did these numbers come from? Add a public link after validating them. |
| 40 | + // These numbers probably don't work for in-place filtering either. |
| 41 | + match size_of::<T>() { |
| 42 | + 1 => 0.03, |
| 43 | + 2 => 0.03, |
| 44 | + 4 => 0.075, |
| 45 | + _ => 0.09, |
| 46 | + // >8 bytes may have a higher threshold. These numbers are derived from a GCP c2-standard-4 |
| 47 | + // with a "Cascade Lake" CPU. |
| 48 | + } |
| 49 | +} |
| 50 | + |
| 51 | +#[derive(Debug)] |
| 52 | +struct BitPackingFilterKernel; |
| 53 | + |
| 54 | +impl ExecuteParentKernel<BitPackedVTable> for BitPackingFilterKernel { |
| 55 | + type Parent = Exact<FilterVTable>; |
| 56 | + |
| 57 | + fn parent(&self) -> Self::Parent { |
| 58 | + Exact::from(&FilterVTable) |
| 59 | + } |
| 60 | + |
| 61 | + fn execute_parent( |
| 62 | + &self, |
| 63 | + array: &BitPackedArray, |
| 64 | + parent: &FilterArray, |
| 65 | + _child_idx: usize, |
| 66 | + ctx: &mut ExecutionCtx, |
| 67 | + ) -> VortexResult<Option<Vector>> { |
| 68 | + let selection = parent.filter_mask(); |
| 69 | + |
| 70 | + let true_count = selection.true_count(); |
| 71 | + if true_count == 0 { |
| 72 | + // Fast-path for an empty mask. |
| 73 | + return Ok(Some(VectorMut::with_capacity(array.dtype(), 0).freeze())); |
| 74 | + } else if true_count == selection.len() { |
| 75 | + // Fast-path for a full mask. |
| 76 | + return Ok(Some(array.to_array().execute(ctx)?)); |
| 77 | + } |
| 78 | + |
| 79 | + match_each_integer_ptype!(array.ptype(), |I| { |
| 80 | + // If the density is high enough, then we would rather decompress the whole array and then apply |
| 81 | + // a filter over decompressing values one by one. |
| 82 | + if selection.density() > unpack_then_filter_threshold::<I>() { |
| 83 | + return Ok(None); |
| 84 | + } |
| 85 | + }); |
| 86 | + |
| 87 | + let primitive_vector: PrimitiveVector = match array.ptype() { |
| 88 | + PType::U8 => filter_primitive::<u8>(array, selection)?.into(), |
| 89 | + PType::U16 => filter_primitive::<u16>(array, selection)?.into(), |
| 90 | + PType::U32 => filter_primitive::<u32>(array, selection)?.into(), |
| 91 | + PType::U64 => filter_primitive::<u64>(array, selection)?.into(), |
| 92 | + |
| 93 | + // Since the fastlanes crate only supports unsigned integers, and since we know that all |
| 94 | + // numbers are going to be non-negative, we can safely "cast" to unsigned and back. |
| 95 | + PType::I8 => { |
| 96 | + let pvector = filter_primitive::<u8>(array, selection)?; |
| 97 | + pvector.cast_into::<i8>().into() |
| 98 | + } |
| 99 | + PType::I16 => { |
| 100 | + let pvector = filter_primitive::<u16>(array, selection)?; |
| 101 | + pvector.cast_into::<i16>().into() |
| 102 | + } |
| 103 | + PType::I32 => { |
| 104 | + let pvector = filter_primitive::<u32>(array, selection)?; |
| 105 | + pvector.cast_into::<i32>().into() |
| 106 | + } |
| 107 | + PType::I64 => { |
| 108 | + let pvector = filter_primitive::<u64>(array, selection)?; |
| 109 | + pvector.cast_into::<i64>().into() |
| 110 | + } |
| 111 | + other => { |
| 112 | + unreachable!("Unsupported ptype {other} for bitpacking, we also checked this above") |
| 113 | + } |
| 114 | + }; |
| 115 | + |
| 116 | + Ok(Some(primitive_vector.into())) |
| 117 | + } |
| 118 | +} |
| 119 | + |
| 120 | +/// Specialized filter kernel for primitive bit-packed arrays. |
| 121 | +/// |
| 122 | +/// Because the FastLanes bit-packing kernels are only implemented for unsigned types, the provided |
| 123 | +/// `U` should be promoted to the unsigned variant for any target bit width. |
| 124 | +/// For example, if the array is bit-packed `i16`, this function should be called with `U = u16`. |
| 125 | +/// |
| 126 | +/// This function fully decompresses the array for all but the most selective masks because the |
| 127 | +/// FastLanes decompression is so fast and the bookkeepping necessary to decompress individual |
| 128 | +/// elements is relatively slow. |
| 129 | +fn filter_primitive<U: UnsignedPType + BitPacking>( |
| 130 | + array: &BitPackedArray, |
| 131 | + selection: &Mask, |
| 132 | +) -> VortexResult<PVector<U>> { |
| 133 | + let values = filter_with_indices( |
| 134 | + array, |
| 135 | + selection |
| 136 | + .values() |
| 137 | + .vortex_expect("AllTrue and AllFalse handled by filter fn") |
| 138 | + .indices(), |
| 139 | + ); |
| 140 | + let validity = array.validity_mask().filter(selection); |
| 141 | + |
| 142 | + debug_assert_eq!( |
| 143 | + values.len(), |
| 144 | + validity.len(), |
| 145 | + "`filter_with_indices` was somehow incorrect" |
| 146 | + ); |
| 147 | + |
| 148 | + let mut pvector = unsafe { PVector::new_unchecked(values, validity) }; |
| 149 | + |
| 150 | + // TODO(connor): We want a `PatchesArray` or patching compute functions instead of this. |
| 151 | + let patches = array |
| 152 | + .patches() |
| 153 | + .map(|patches| patches.filter(selection)) |
| 154 | + .transpose()? |
| 155 | + .flatten(); |
| 156 | + if let Some(patches) = patches { |
| 157 | + pvector = patch_pvector(pvector, &patches); |
| 158 | + } |
| 159 | + |
| 160 | + Ok(pvector) |
| 161 | +} |
| 162 | + |
| 163 | +fn filter_with_indices<T: NativePType + BitPacking>( |
| 164 | + array: &BitPackedArray, |
| 165 | + indices: &[usize], |
| 166 | +) -> Buffer<T> { |
| 167 | + let offset = array.offset() as usize; |
| 168 | + let bit_width = array.bit_width() as usize; |
| 169 | + let mut values = BufferMut::with_capacity(indices.len()); |
| 170 | + |
| 171 | + // Some re-usable memory to store per-chunk indices. |
| 172 | + let mut unpacked = [const { MaybeUninit::<T>::uninit() }; 1024]; |
| 173 | + let packed_bytes = array.packed_slice::<T>(); |
| 174 | + |
| 175 | + // Group the indices by the FastLanes chunk they belong to. |
| 176 | + let chunk_size = 128 * bit_width / size_of::<T>(); |
| 177 | + |
| 178 | + chunked_indices(indices, offset, |chunk_idx, indices_within_chunk| { |
| 179 | + let packed = &packed_bytes[chunk_idx * chunk_size..][..chunk_size]; |
| 180 | + |
| 181 | + if indices_within_chunk.len() == 1024 { |
| 182 | + // Unpack the entire chunk. |
| 183 | + unsafe { |
| 184 | + let values_len = values.len(); |
| 185 | + values.set_len(values_len + 1024); |
| 186 | + BitPacking::unchecked_unpack( |
| 187 | + bit_width, |
| 188 | + packed, |
| 189 | + &mut values.as_mut_slice()[values_len..], |
| 190 | + ); |
| 191 | + } |
| 192 | + } else if indices_within_chunk.len() > UNPACK_CHUNK_THRESHOLD { |
| 193 | + // Unpack into a temporary chunk and then copy the values. |
| 194 | + unsafe { |
| 195 | + let dst: &mut [MaybeUninit<T>] = &mut unpacked; |
| 196 | + let dst: &mut [T] = std::mem::transmute(dst); |
| 197 | + BitPacking::unchecked_unpack(bit_width, packed, dst); |
| 198 | + } |
| 199 | + values.extend_trusted( |
| 200 | + indices_within_chunk |
| 201 | + .iter() |
| 202 | + .map(|&idx| unsafe { unpacked.get_unchecked(idx).assume_init() }), |
| 203 | + ); |
| 204 | + } else { |
| 205 | + // Otherwise, unpack each element individually. |
| 206 | + values.extend_trusted(indices_within_chunk.iter().map(|&idx| unsafe { |
| 207 | + BitPacking::unchecked_unpack_single(bit_width, packed, idx) |
| 208 | + })); |
| 209 | + } |
| 210 | + }); |
| 211 | + |
| 212 | + values.freeze() |
| 213 | +} |
0 commit comments