Skip to content
Merged
Show file tree
Hide file tree
Changes from 35 commits
Commits
Show all changes
52 commits
Select commit Hold shift + click to select a range
746bf31
Add SQ8-to-SQ8 distance functions and optimizations
dor-forer Dec 28, 2025
8697a3e
Add SQ8-to-SQ8 benchmark tests and update related scripts
dor-forer Dec 28, 2025
e0ce268
Format
dor-forer Dec 28, 2025
ab6b077
Orgnizing
dor-forer Dec 28, 2025
931e339
Add full sq8 bencharks
dor-forer Dec 28, 2025
a56474d
Optimize the sq8 sq8
dor-forer Dec 28, 2025
a25f45c
Optimize SQ8 distance functions for NEON by reducing operations and i…
dor-forer Dec 28, 2025
0ad941e
format
dor-forer Dec 28, 2025
68cd068
Add NEON DOTPROD-optimized distance functions for SQ8-to-SQ8 calculat…
dor-forer Dec 28, 2025
0b4b568
PR
dor-forer Dec 28, 2025
d0fd2e4
Remove NEON DOTPROD-optimized distance functions for INT8, UINT8, and…
dor-forer Dec 28, 2025
9de6163
Fix vector layout documentation by removing inv_norm from comments in…
dor-forer Dec 28, 2025
63a46a1
Remove 'constexpr' from ones vector declaration in NEON inner product…
dor-forer Dec 28, 2025
525f8da
Refactor distance functions to remove inv_norm parameter and update d…
dor-forer Dec 29, 2025
13a477b
Update SQ8 Cosine test to normalize both input vectors and adjust dis…
dor-forer Dec 29, 2025
c18000e
Rename 'compressed' to 'quantized' in SQ8 functions for clarity and c…
dor-forer Dec 29, 2025
bbf810e
Implement SQ8-to-SQ8 distance functions with precomputed sum and norm…
dor-forer Dec 29, 2025
dbbb7d9
Add edge case tests for SQ8-to-SQ8 precomputed cosine distance functions
dor-forer Dec 29, 2025
36ab068
Refactor SQ8 test cases to use CreateSQ8QuantizedVector for vector po…
dor-forer Dec 29, 2025
00617d7
Implement SQ8-to-SQ8 precomputed distance functions using ARM NEON, S…
dor-forer Dec 29, 2025
4331d91
Implement SQ8-to-SQ8 precomputed inner product and cosine functions; …
dor-forer Dec 29, 2025
2e7b30d
Refactor SQ8 distance functions and remove precomputed variants
dor-forer Dec 30, 2025
a111e36
Refactor SQ8 distance functions and tests for improved clarity and co…
dor-forer Dec 30, 2025
d510b8a
Refactor SQ8 benchmarks by removing precomputed variants and updating…
dor-forer Dec 30, 2025
ee26740
foramt
dor-forer Dec 30, 2025
afe1a4f
Remove serialization benchmark script for HNSW disk serialization
dor-forer Dec 30, 2025
a31f95c
Refactor SQ8 distance functions and tests to remove precomputed norm …
dor-forer Dec 31, 2025
f12ecf4
format
dor-forer Dec 31, 2025
0e36030
Merge branch 'main' of https://github.com/RedisAI/VectorSimilarity in…
dor-forer Dec 31, 2025
fdc16c6
Refactor SQ8 distance tests to use compressed vectors and improve nor…
dor-forer Dec 31, 2025
e5f519c
Update vector layout documentation to reflect removal of sum of squar…
dor-forer Dec 31, 2025
db1e671
Refactor SQ8 distance functions to remove norm computation
dor-forer Jan 1, 2026
d5b8587
Update SQ8-to-SQ8 distance function comment to remove norm reference
dor-forer Jan 1, 2026
91f48df
Refactor cosine similarity functions to remove unnecessary subtractio…
dor-forer Jan 1, 2026
b660111
Refactor cosine similarity functions to use specific SIMD implementat…
dor-forer Jan 1, 2026
9166cac
Refactor benchmark setup to allocate additional space for sum and sum…
dor-forer Jan 4, 2026
f28f4e7
Add CPU feature checks to disable optimizations for AArch64 in SQ8 di…
dor-forer Jan 4, 2026
e50dc45
Add CPU feature checks to disable optimizations for AArch64 in SQ8 di…
dor-forer Jan 4, 2026
6bbbc38
Fix formatting issues in SQ8 inner product function and clean up cond…
dor-forer Jan 4, 2026
66a5f88
Enhance SQ8 Inner Product Implementations with Optimized Dot Product …
dor-forer Jan 4, 2026
d7972e9
Fix header guard duplication and update test assertion for floating-p…
dor-forer Jan 4, 2026
a8075bf
Add missing pragma once directive in NEON header files
dor-forer Jan 4, 2026
cddc497
Refactor SQ8 distance functions for improved performance and clarity
dor-forer Jan 4, 2026
4f0fec7
Update SQ8 vector population functions to include metadata and adjust…
dor-forer Jan 4, 2026
8ab4192
Refactor SQ8 inner product functions for improved clarity and perform…
dor-forer Jan 4, 2026
8c59cb2
Rename inner product implementation functions for AVX2 and AVX512 for…
dor-forer Jan 4, 2026
a4ff5d0
Refactor SQ8 cosine function to utilize inner product function for im…
dor-forer Jan 4, 2026
c22158f
Remove redundant inner product edge case tests for SQ8 distance funct…
dor-forer Jan 4, 2026
4c19d9e
Add SVE2 support to SQ8-to-SQ8 Inner Product distance function
dor-forer Jan 4, 2026
5c22af8
Remove SVE2 and other optimizations from SQ8 cosine function test for…
dor-forer Jan 4, 2026
9e50d7c
Update NEON benchmarks to use a vector size of 64 for SQ8-to-SQ8 func…
dor-forer Jan 4, 2026
2e57cf2
Increase allocated space for cosine calculations in SQ8 benchmark setup
dor-forer Jan 4, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 65 additions & 7 deletions src/VecSim/spaces/IP/IP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,25 +15,25 @@ using bfloat16 = vecsim_types::bfloat16;
using float16 = vecsim_types::float16;

float FLOAT_INTEGER_InnerProduct(const float *pVect1v, const uint8_t *pVect2v, size_t dimension,
float min_val, float delta, float inv_norm) {
float min_val, float delta) {
float res = 0;
for (size_t i = 0; i < dimension; i++) {
float dequantized_V2 = (pVect2v[i] * delta + min_val);
res += pVect1v[i] * dequantized_V2;
}
return res * inv_norm;
return res;
}

float SQ8_InnerProduct(const void *pVect1v, const void *pVect2v, size_t dimension) {
const auto *pVect1 = static_cast<const float *>(pVect1v);
const auto *pVect2 = static_cast<const uint8_t *>(pVect2v);
// pVect2 is a vector of uint8_t, so we need to de-quantize it, normalize it and then multiply
// it. it is structured as [quantized values (int8_t * dim)][min_val (float)][delta
// (float)][inv_norm (float)] The last two values are used to dequantize the vector.
// (float)]] The last two values are used to dequantize the vector.
const float min_val = *reinterpret_cast<const float *>(pVect2 + dimension);
const float delta = *reinterpret_cast<const float *>(pVect2 + dimension + sizeof(float));
// Compute inner product with dequantization
const float res = FLOAT_INTEGER_InnerProduct(pVect1, pVect2, dimension, min_val, delta, 1.0f);
const float res = FLOAT_INTEGER_InnerProduct(pVect1, pVect2, dimension, min_val, delta);
return 1.0f - res;
}

Expand All @@ -44,10 +44,68 @@ float SQ8_Cosine(const void *pVect1v, const void *pVect2v, size_t dimension) {
// Get quantization parameters
const float min_val = *reinterpret_cast<const float *>(pVect2 + dimension);
const float delta = *reinterpret_cast<const float *>(pVect2 + dimension + sizeof(float));
const float inv_norm = *reinterpret_cast<const float *>(pVect2 + dimension + 2 * sizeof(float));
// Compute inner product with dequantization
const float res =
FLOAT_INTEGER_InnerProduct(pVect1, pVect2, dimension, min_val, delta, inv_norm);
const float res = FLOAT_INTEGER_InnerProduct(pVect1, pVect2, dimension, min_val, delta);
return 1.0f - res;
}

// SQ8-to-SQ8: Both vectors are uint8 quantized with precomputed sum
// Vector layout: [uint8_t values (dim)] [min_val (float)] [delta (float)] [sum (float)]
float SQ8_SQ8_InnerProduct(const void *pVect1v, const void *pVect2v, size_t dimension) {
const auto *pVect1 = static_cast<const uint8_t *>(pVect1v);
const auto *pVect2 = static_cast<const uint8_t *>(pVect2v);

// Compute inner product of quantized values: Σ(q1[i]*q2[i])
float product = 0;
for (size_t i = 0; i < dimension; i++) {
product += pVect1[i] * pVect2[i];
}

// Extract metadata from the end of vectors (likely already prefetched)
// Get quantization parameters from pVect1
const float min_val1 = *reinterpret_cast<const float *>(pVect1 + dimension);
const float delta1 = *reinterpret_cast<const float *>(pVect1 + dimension + sizeof(float));
const float sum1 = *reinterpret_cast<const float *>(pVect1 + dimension + 2 * sizeof(float));

// Get quantization parameters from pVect2
const float min_val2 = *reinterpret_cast<const float *>(pVect2 + dimension);
const float delta2 = *reinterpret_cast<const float *>(pVect2 + dimension + sizeof(float));
const float sum2 = *reinterpret_cast<const float *>(pVect2 + dimension + 2 * sizeof(float));

// Apply the algebraic formula using precomputed sums:
// IP = min1*sum2 + min2*sum1 + delta1*delta2*Σ(q1[i]*q2[i]) - dim*min1*min2
float res = min_val1 * sum2 + min_val2 * sum1 -
static_cast<float>(dimension) * min_val1 * min_val2 + delta1 * delta2 * product;
return 1.0f - res;
}

// SQ8-to-SQ8: Both vectors are uint8 quantized and normalized with precomputed sum
// Vector layout: [uint8_t values (dim)] [min_val (float)] [delta (float)] [sum (float)]
float SQ8_SQ8_Cosine(const void *pVect1v, const void *pVect2v, size_t dimension) {
const auto *pVect1 = static_cast<const uint8_t *>(pVect1v);
const auto *pVect2 = static_cast<const uint8_t *>(pVect2v);

// Compute inner product of quantized values: Σ(q1[i]*q2[i])
float product = 0;
for (size_t i = 0; i < dimension; i++) {
product += pVect1[i] * pVect2[i];
}

// Extract metadata from the end of vectors
// Get quantization parameters from pVect1
const float min_val1 = *reinterpret_cast<const float *>(pVect1 + dimension);
const float delta1 = *reinterpret_cast<const float *>(pVect1 + dimension + sizeof(float));
const float sum1 = *reinterpret_cast<const float *>(pVect1 + dimension + 2 * sizeof(float));

// Get quantization parameters from pVect2
const float min_val2 = *reinterpret_cast<const float *>(pVect2 + dimension);
const float delta2 = *reinterpret_cast<const float *>(pVect2 + dimension + sizeof(float));
const float sum2 = *reinterpret_cast<const float *>(pVect2 + dimension + 2 * sizeof(float));

// Apply the algebraic formula using precomputed sums:
// IP = min1*sum2 + min2*sum1 + delta1*delta2*Σ(q1[i]*q2[i]) - dim*min1*min2
float res = min_val1 * sum2 + min_val2 * sum1 -
static_cast<float>(dimension) * min_val1 * min_val2 + delta1 * delta2 * product;
return 1.0f - res;
}

Expand Down
8 changes: 8 additions & 0 deletions src/VecSim/spaces/IP/IP.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,14 @@ float SQ8_InnerProduct(const void *pVect1v, const void *pVect2v, size_t dimensio
// pVect1v vector of type fp32 and pVect2v vector of type uint8
float SQ8_Cosine(const void *pVect1v, const void *pVect2v, size_t dimension);

// SQ8-to-SQ8: Both vectors are uint8 quantized with precomputed sum
// Vector layout: [uint8_t values (dim)] [min_val (float)] [delta (float)] [sum (float)]
float SQ8_SQ8_InnerProduct(const void *pVect1v, const void *pVect2v, size_t dimension);

// SQ8-to-SQ8: Both vectors are uint8 quantized and normalized with precomputed sum
// Vector layout: [uint8_t values (dim)] [min_val (float)] [delta (float)] [sum (float)]
float SQ8_SQ8_Cosine(const void *pVect1v, const void *pVect2v, size_t dimension);

float FP32_InnerProduct(const void *pVect1, const void *pVect2, size_t dimension);

double FP64_InnerProduct(const void *pVect1, const void *pVect2, size_t dimension);
Expand Down
12 changes: 2 additions & 10 deletions src/VecSim/spaces/IP/IP_AVX2_FMA_SQ8.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,14 +100,6 @@ float SQ8_InnerProductSIMD16_AVX2_FMA(const void *pVect1v, const void *pVect2v,

template <unsigned char residual> // 0..15
float SQ8_CosineSIMD16_AVX2_FMA(const void *pVect1v, const void *pVect2v, size_t dimension) {
// Get dequantization parameters from the end of quantized vector
const uint8_t *pVect2 = static_cast<const uint8_t *>(pVect2v);
const float inv_norm = *reinterpret_cast<const float *>(pVect2 + dimension + 2 * sizeof(float));

// Calculate inner product using common implementation with normalization
float ip = SQ8_InnerProductImp_FMA<residual>(pVect1v, pVect2v, dimension);

// For cosine, we need to account for the vector norms
// The inv_norm parameter is stored after min_val and delta in the quantized vector
return 1.0f - ip * inv_norm;
// Assume vectors are normalized.
return SQ8_InnerProductSIMD16_AVX2_FMA<residual>(pVect1v, pVect2v, dimension);
}
16 changes: 4 additions & 12 deletions src/VecSim/spaces/IP/IP_AVX2_SQ8.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ static inline void InnerProductStepSQ8(const float *&pVect1, const uint8_t *&pVe
}

template <unsigned char residual> // 0..15
float SQ8_InnerProductImp(const void *pVect1v, const void *pVect2v, size_t dimension) {
float SQ8_InnerProductImp_AVX2(const void *pVect1v, const void *pVect2v, size_t dimension) {
const float *pVect1 = static_cast<const float *>(pVect1v);
// pVect2 is a quantized uint8_t vector
const uint8_t *pVect2 = static_cast<const uint8_t *>(pVect2v);
Expand Down Expand Up @@ -89,19 +89,11 @@ float SQ8_InnerProductImp(const void *pVect1v, const void *pVect2v, size_t dimen

template <unsigned char residual> // 0..15
float SQ8_InnerProductSIMD16_AVX2(const void *pVect1v, const void *pVect2v, size_t dimension) {
return 1.0f - SQ8_InnerProductImp<residual>(pVect1v, pVect2v, dimension);
return 1.0f - SQ8_InnerProductImp_AVX2<residual>(pVect1v, pVect2v, dimension);
}

template <unsigned char residual> // 0..15
float SQ8_CosineSIMD16_AVX2(const void *pVect1v, const void *pVect2v, size_t dimension) {
// Get dequantization parameters from the end of quantized vector
const uint8_t *pVect2 = static_cast<const uint8_t *>(pVect2v);
const float inv_norm = *reinterpret_cast<const float *>(pVect2 + dimension + 2 * sizeof(float));

// Calculate inner product using common implementation with normalization
float ip = SQ8_InnerProductImp<residual>(pVect1v, pVect2v, dimension);

// For cosine, we need to account for the vector norms
// The inv_norm parameter is stored after min_val and delta in the quantized vector
return 1.0f - ip * inv_norm;
// Assume vectors are normalized.
return SQ8_InnerProductSIMD16_AVX2<residual>(pVect1v, pVect2v, dimension);
}
145 changes: 145 additions & 0 deletions src/VecSim/spaces/IP/IP_AVX512F_BW_VL_VNNI_SQ8.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
/*
* Copyright (c) 2006-Present, Redis Ltd.
* All rights reserved.
*
* Licensed under your choice of the Redis Source Available License 2.0
* (RSALv2); or (b) the Server Side Public License v1 (SSPLv1); or (c) the
* GNU Affero General Public License v3 (AGPLv3).
*/
#pragma once
#include "VecSim/spaces/space_includes.h"
#include <immintrin.h>

/**
* SQ8 distance functions (float32 query vs uint8 stored) using AVX512.
*
* Uses algebraic optimization to reduce operations per element:
*
* IP = Σ query[i] * (val[i] * δ + min)
* = δ * Σ(query[i] * val[i]) + min * Σ(query[i])
*
* This saves one FMA per 16 elements by separating:
* - dot_sum: accumulates query[i] * val[i]
* - query_sum: accumulates query[i]
* Then combines at the end: result = δ * dot_sum + min * query_sum
*
* Also uses multiple accumulators for better instruction-level parallelism.
*
* Vector layout: [uint8_t values (dim)] [min_val (float)] [delta (float)] [sum (float)]
*/

// Process 16 elements with algebraic optimization
static inline void SQ8_InnerProductStep(const float *pVec1, const uint8_t *pVec2, __m512 &dot_sum,
__m512 &query_sum) {
// Load 16 float elements from query
__m512 v1 = _mm512_loadu_ps(pVec1);

// Load 16 uint8 elements and convert to float
__m128i v2_128 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(pVec2));
__m512i v2_512 = _mm512_cvtepu8_epi32(v2_128);
__m512 v2_f = _mm512_cvtepi32_ps(v2_512);

// Accumulate query * val (without dequantization)
dot_sum = _mm512_fmadd_ps(v1, v2_f, dot_sum);

// Accumulate query sum
query_sum = _mm512_add_ps(query_sum, v1);
}

// Common implementation for both inner product and cosine similarity
template <unsigned char residual> // 0..15
float SQ8_InnerProductImp_AVX512(const void *pVec1v, const void *pVec2v, size_t dimension) {
const float *pVec1 = static_cast<const float *>(pVec1v);
const uint8_t *pVec2 = static_cast<const uint8_t *>(pVec2v);

// Get dequantization parameters from the end of pVec2
const float min_val = *reinterpret_cast<const float *>(pVec2 + dimension);
const float delta = *reinterpret_cast<const float *>(pVec2 + dimension + sizeof(float));

// Multiple accumulators for instruction-level parallelism
__m512 dot_sum0 = _mm512_setzero_ps();
__m512 dot_sum1 = _mm512_setzero_ps();
__m512 dot_sum2 = _mm512_setzero_ps();
__m512 dot_sum3 = _mm512_setzero_ps();
__m512 query_sum0 = _mm512_setzero_ps();
__m512 query_sum1 = _mm512_setzero_ps();
__m512 query_sum2 = _mm512_setzero_ps();
__m512 query_sum3 = _mm512_setzero_ps();

size_t offset = 0;

// Deal with remainder first
if constexpr (residual > 0) {
// Handle less than 16 elements
__mmask16 mask = (1U << residual) - 1;

// Load masked float elements from query
__m512 v1 = _mm512_maskz_loadu_ps(mask, pVec1);

// Load uint8 elements and convert to float
__m128i v2_128 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(pVec2));
__m512i v2_512 = _mm512_cvtepu8_epi32(v2_128);
__m512 v2_f = _mm512_cvtepi32_ps(v2_512);

// Masked accumulation (mask already zeroed unused elements in v1)
dot_sum0 = _mm512_mul_ps(v1, v2_f);
query_sum0 = v1;

offset = residual;
}

// Calculate number of full 64-element chunks (4 x 16)
size_t num_chunks = (dimension - residual) / 64;

// Process 4 chunks at a time for maximum ILP
for (size_t i = 0; i < num_chunks; i++) {
SQ8_InnerProductStep(pVec1 + offset, pVec2 + offset, dot_sum0, query_sum0);
SQ8_InnerProductStep(pVec1 + offset + 16, pVec2 + offset + 16, dot_sum1, query_sum1);
SQ8_InnerProductStep(pVec1 + offset + 32, pVec2 + offset + 32, dot_sum2, query_sum2);
SQ8_InnerProductStep(pVec1 + offset + 48, pVec2 + offset + 48, dot_sum3, query_sum3);
offset += 64;
}

// Handle remaining 16-element chunks (0-3 remaining)
size_t remaining = (dimension - residual) % 64;
if (remaining >= 16) {
SQ8_InnerProductStep(pVec1 + offset, pVec2 + offset, dot_sum0, query_sum0);
offset += 16;
remaining -= 16;
}
if (remaining >= 16) {
SQ8_InnerProductStep(pVec1 + offset, pVec2 + offset, dot_sum1, query_sum1);
offset += 16;
remaining -= 16;
}
if (remaining >= 16) {
SQ8_InnerProductStep(pVec1 + offset, pVec2 + offset, dot_sum2, query_sum2);
}

// Combine accumulators
__m512 dot_total =
_mm512_add_ps(_mm512_add_ps(dot_sum0, dot_sum1), _mm512_add_ps(dot_sum2, dot_sum3));
__m512 query_total =
_mm512_add_ps(_mm512_add_ps(query_sum0, query_sum1), _mm512_add_ps(query_sum2, query_sum3));

// Reduce to scalar
float dot_product = _mm512_reduce_add_ps(dot_total);
float query_sum = _mm512_reduce_add_ps(query_total);

// Apply algebraic formula: IP = δ * Σ(query*val) + min * Σ(query)
return delta * dot_product + min_val * query_sum;
}

template <unsigned char residual> // 0..15
float SQ8_InnerProductSIMD16_AVX512F_BW_VL_VNNI(const void *pVec1v, const void *pVec2v,
size_t dimension) {
// The inner product similarity is 1 - ip
return 1.0f -SQ8_InnerProductImp_AVX512<residual>(pVec1v, pVec2v, dimension);;
}

template <unsigned char residual> // 0..15
float SQ8_CosineSIMD16_AVX512F_BW_VL_VNNI(const void *pVec1v, const void *pVec2v,
size_t dimension) {
// Assume vectors are normalized.
return SQ8_InnerProductSIMD16_AVX512F_BW_VL_VNNI<residual>(pVec1v, pVec2v, dimension);
}
Loading
Loading