@@ -78,11 +78,6 @@ SZ_PUBLIC sz_u64_t sz_checksum_serial(sz_cptr_t text, sz_size_t length);
7878/* * @copydoc sz_hash */
7979SZ_PUBLIC sz_u64_t sz_hash_serial (sz_cptr_t text, sz_size_t length);
8080
81- /* * @copydoc sz_hashes */
82- SZ_PUBLIC void sz_hashes_serial ( //
83- sz_cptr_t text, sz_size_t length, sz_size_t window_length, sz_size_t window_step, //
84- sz_hash_callback_t callback, void *callback_handle);
85-
8681/* * @copydoc sz_generate */
8782SZ_PUBLIC void sz_generate_serial ( //
8883 sz_cptr_t alphabet, sz_size_t cardinality, sz_ptr_t text, sz_size_t length, sz_random_generator_t generate,
@@ -261,7 +256,7 @@ SZ_PUBLIC sz_u64_t sz_checksum_haswell(sz_cptr_t text, sz_size_t length) {
261256 text_vec.ymm = _mm256_lddqu_si256 ((__m256i const *)text);
262257 sums_vec.ymm = _mm256_add_epi64 (sums_vec.ymm , _mm256_sad_epu8 (text_vec.ymm , _mm256_setzero_si256 ()));
263258 }
264- // Accumulating 256 bits is harders , as we need to extract the 128-bit sums first.
259+ // Accumulating 256 bits is harder , as we need to extract the 128-bit sums first.
265260 __m128i low_xmm = _mm256_castsi256_si128 (sums_vec.ymm );
266261 __m128i high_xmm = _mm256_extracti128_si256 (sums_vec.ymm , 1 );
267262 __m128i sums_xmm = _mm_add_epi64 (low_xmm, high_xmm);
@@ -291,7 +286,7 @@ SZ_PUBLIC sz_u64_t sz_checksum_haswell(sz_cptr_t text, sz_size_t length) {
291286 sums_vec.ymm = _mm256_add_epi64 (sums_vec.ymm , _mm256_sad_epu8 (text_vec.ymm , _mm256_setzero_si256 ()));
292287 }
293288 }
294- // When the biffer is huge, we can traverse it in 2 directions.
289+ // When the buffer is huge, we can traverse it in 2 directions.
295290 else {
296291 sz_u256_vec_t text_reversed_vec, sums_reversed_vec;
297292 sums_reversed_vec.ymm = _mm256_setzero_si256 ();
@@ -312,7 +307,7 @@ SZ_PUBLIC sz_u64_t sz_checksum_haswell(sz_cptr_t text, sz_size_t length) {
312307 // Handle the tail
313308 while (tail_length--) result += *text++;
314309
315- // Accumulating 256 bits is harders , as we need to extract the 128-bit sums first.
310+ // Accumulating 256 bits is harder , as we need to extract the 128-bit sums first.
316311 __m128i low_xmm = _mm256_castsi256_si128 (sums_vec.ymm );
317312 __m128i high_xmm = _mm256_extracti128_si256 (sums_vec.ymm , 1 );
318313 __m128i sums_xmm = _mm_add_epi64 (low_xmm, high_xmm);
0 commit comments