-
Notifications
You must be signed in to change notification settings - Fork 279
Expand file tree
/
Copy pathsequencefile_test.py
More file actions
440 lines (339 loc) · 16.3 KB
/
sequencefile_test.py
File metadata and controls
440 lines (339 loc) · 16.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
# Copyright (c) 2026, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import struct
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_row_counts_equal
from data_gen import *
from marks import *
from pyspark.sql.types import *
from spark_session import with_cpu_session, with_gpu_session
# Reader types supported by SequenceFile (COALESCING is not supported)
sequencefile_reader_types = ['PERFILE', 'MULTITHREADED']
def read_sequencefile_df(data_path):
"""Helper function to read SequenceFile using DataFrame API."""
return lambda spark: spark.read.format("sequencefilebinary").load(data_path)
def write_sequencefile_with_rdd(spark, data_path, payloads):
"""
Write an uncompressed SequenceFile using Spark's RDD saveAsNewAPIHadoopFile method.
payloads: list of byte arrays to be written as values (keys will be incrementing integers).
This writes actual BytesWritable key/value pairs that can be read by the
sequencefilebinary format.
"""
sc = spark.sparkContext
# Create (key, value) pairs where key is 4-byte big-endian integer
# Convert to bytearray for proper BytesWritable serialization
records = [(bytearray(struct.pack('>I', idx)), bytearray(payload))
for idx, payload in enumerate(payloads)]
# Create RDD and save as SequenceFile using Hadoop API
rdd = sc.parallelize(records, 1)
# Use saveAsNewAPIHadoopFile with BytesWritable key/value classes
# and SequenceFileOutputFormat
rdd.saveAsNewAPIHadoopFile(
data_path,
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.BytesWritable",
"org.apache.hadoop.io.BytesWritable"
)
# ============================================================================
# Basic Read Tests
# ============================================================================
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_basic_read(spark_tmp_path, reader_type):
"""Test basic SequenceFile reading with different reader types."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Write test data using CPU
payloads = [
b'\x01\x02\x03',
b'hello world',
b'\xff' * 10
]
with_cpu_session(lambda spark: write_sequencefile_with_rdd(spark, data_path, payloads))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_read_key_only(spark_tmp_path, reader_type):
"""Test reading only the key column (column pruning)."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
payloads = [b'value1', b'value2', b'value3']
with_cpu_session(lambda spark: write_sequencefile_with_rdd(spark, data_path, payloads))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("sequencefilebinary").load(data_path).select("key"),
conf=all_confs)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_read_value_only(spark_tmp_path, reader_type):
"""Test reading only the value column (column pruning)."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
payloads = [b'value1', b'value2', b'value3']
with_cpu_session(lambda spark: write_sequencefile_with_rdd(spark, data_path, payloads))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("sequencefilebinary").load(data_path).select("value"),
conf=all_confs)
# ============================================================================
# Empty File Tests
# ============================================================================
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_empty_file(spark_tmp_path, reader_type):
"""Test reading an empty SequenceFile."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Write empty file
with_cpu_session(lambda spark: write_sequencefile_with_rdd(spark, data_path, []))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)
# ============================================================================
# Multi-file Tests
# ============================================================================
@ignore_order(local=True)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_multi_file_read(spark_tmp_path, reader_type):
"""Test reading multiple SequenceFiles from a directory."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Write multiple files
for i in range(3):
file_path = data_path + f'/file{i}'
payloads = [f'file{i}_record{j}'.encode() for j in range(5)]
with_cpu_session(lambda spark, p=payloads, fp=file_path:
write_sequencefile_with_rdd(spark, fp, p))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)
# ============================================================================
# Partitioned Read Tests
# ============================================================================
@ignore_order(local=True)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_partitioned_read(spark_tmp_path, reader_type):
"""Test reading SequenceFiles with Hive-style partitioning."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create partitioned directory structure
for part_val in ['a', 'b', 'c']:
part_path = data_path + f'/part={part_val}'
payloads = [f'{part_val}_record{i}'.encode() for i in range(3)]
with_cpu_session(lambda spark, p=payloads, pp=part_path:
write_sequencefile_with_rdd(spark, pp, p))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
# Read and verify both data columns and partition column
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("sequencefilebinary").load(data_path)
.select("key", "value", "part"),
conf=all_confs)
@ignore_order(local=True)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_partitioned_read_just_partitions(spark_tmp_path, reader_type):
"""Test reading only partition columns from SequenceFiles."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create partitioned directory structure - use 'pkey' to avoid collision with 'key' data column
for part_val in [0, 1, 2]:
part_path = data_path + f'/pkey={part_val}'
payloads = [f'record{i}'.encode() for i in range(2)]
with_cpu_session(lambda spark, p=payloads, pp=part_path:
write_sequencefile_with_rdd(spark, pp, p))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
# Select only the partition column
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("sequencefilebinary").load(data_path).select("pkey"),
conf=all_confs)
@ignore_order(local=True)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_nested_partitions(spark_tmp_path, reader_type):
"""Test reading SequenceFiles with nested partitioning."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create nested partitioned directory structure - use 'pkey' to avoid collision with 'key' data column
for pkey in [0, 1]:
for pkey2 in [20, 21]:
part_path = data_path + f'/pkey={pkey}/pkey2={pkey2}'
payloads = [f'key{pkey}_key2{pkey2}_rec{i}'.encode() for i in range(2)]
with_cpu_session(lambda spark, p=payloads, pp=part_path:
write_sequencefile_with_rdd(spark, pp, p))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)
# ============================================================================
# Large Data Tests
# ============================================================================
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_large_batch(spark_tmp_path, reader_type):
"""Test reading many records to verify batch handling."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create many records
num_records = 1000
payloads = [f'record-{i}-payload-data'.encode() for i in range(num_records)]
with_cpu_session(lambda spark: write_sequencefile_with_rdd(spark, data_path, payloads))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_read_count(spark_tmp_path, reader_type):
"""Test row count operation on SequenceFiles."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
num_records = 500
payloads = [f'record-{i}'.encode() for i in range(num_records)]
with_cpu_session(lambda spark: write_sequencefile_with_rdd(spark, data_path, payloads))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_row_counts_equal(
read_sequencefile_df(data_path),
conf=all_confs)
# ============================================================================
# Varied Record Sizes Tests
# ============================================================================
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_varied_record_sizes(spark_tmp_path, reader_type):
"""Test reading SequenceFiles with varied record sizes."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create records with varying sizes
payloads = [
b'', # Empty
b'x', # 1 byte
b'small', # Small
b'medium-sized-record' * 10, # Medium
b'large-record' * 1000, # Large (~13KB)
]
with_cpu_session(lambda spark: write_sequencefile_with_rdd(spark, data_path, payloads))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_binary_data(spark_tmp_path, reader_type):
"""Test reading SequenceFiles with binary data (all byte values)."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create records with various binary patterns
payloads = [
bytes(range(256)), # All byte values 0-255
bytes([0] * 100), # All zeros
bytes([255] * 100), # All ones
bytes([0xDE, 0xAD, 0xBE, 0xEF] * 25), # Pattern
]
with_cpu_session(lambda spark: write_sequencefile_with_rdd(spark, data_path, payloads))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)
# ============================================================================
# Filter Tests
# ============================================================================
@ignore_order(local=True)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_filter_on_partition(spark_tmp_path, reader_type):
"""Test filtering on partition column."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create partitioned data
for part_val in ['a', 'b', 'c']:
part_path = data_path + f'/part={part_val}'
payloads = [f'{part_val}_record{i}'.encode() for i in range(5)]
with_cpu_session(lambda spark, p=payloads, pp=part_path:
write_sequencefile_with_rdd(spark, pp, p))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
# Filter on partition column
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("sequencefilebinary").load(data_path)
.filter(f.col('part') == 'a'),
conf=all_confs)
# ============================================================================
# Input File Metadata Tests
# ============================================================================
@ignore_order(local=True)
@pytest.mark.parametrize('reader_type', sequencefile_reader_types)
def test_input_file_meta(spark_tmp_path, reader_type):
"""Test reading input file metadata."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create multiple files in partitioned structure - use 'pkey' to avoid collision with 'key' data column
for pkey in [0, 1]:
part_path = data_path + f'/pkey={pkey}'
payloads = [f'key{pkey}_record{i}'.encode() for i in range(3)]
with_cpu_session(lambda spark, p=payloads, pp=part_path:
write_sequencefile_with_rdd(spark, pp, p))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': reader_type
}
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("sequencefilebinary").load(data_path)
.selectExpr('value',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=all_confs)
# ============================================================================
# Multithreaded Reader Tests
# ============================================================================
@ignore_order(local=True)
def test_multithreaded_max_files_parallel(spark_tmp_path):
"""Test multithreaded reader with limited parallel file count."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create multiple small files
for i in range(10):
file_path = data_path + f'/file{i}'
payloads = [f'file{i}_record{j}'.encode() for j in range(5)]
with_cpu_session(lambda spark, p=payloads, fp=file_path:
write_sequencefile_with_rdd(spark, fp, p))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': 'MULTITHREADED',
'spark.rapids.sql.format.sequencefile.multiThreadedRead.maxNumFilesParallel': '3'
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)
# ============================================================================
# AUTO Reader Type Tests
# ============================================================================
@ignore_order(local=True)
def test_auto_reader_type(spark_tmp_path):
"""Test AUTO reader type selection."""
data_path = spark_tmp_path + '/SEQFILE_DATA'
# Create test files
for i in range(3):
file_path = data_path + f'/file{i}'
payloads = [f'file{i}_record{j}'.encode() for j in range(5)]
with_cpu_session(lambda spark, p=payloads, fp=file_path:
write_sequencefile_with_rdd(spark, fp, p))
all_confs = {
'spark.rapids.sql.format.sequencefile.reader.type': 'AUTO'
}
assert_gpu_and_cpu_are_equal_collect(
read_sequencefile_df(data_path),
conf=all_confs)