Skip to content

Commit a5139e8

Browse files
committed
clippy fixes
1 parent d8f75fc commit a5139e8

File tree

9 files changed

+64
-67
lines changed

9 files changed

+64
-67
lines changed

crates/iceberg/src/arrow/reader.rs

Lines changed: 30 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -2006,7 +2006,7 @@ message schema {
20062006
.set_compression(Compression::SNAPPY)
20072007
.build();
20082008

2009-
let file = File::create(format!("{}/1.parquet", &table_location)).unwrap();
2009+
let file = File::create(format!("{table_location}/1.parquet")).unwrap();
20102010
let mut writer =
20112011
ArrowWriter::try_new(file, to_write.schema(), Some(props.clone())).unwrap();
20122012

@@ -2187,7 +2187,7 @@ message schema {
21872187

21882188
let tmp_dir = TempDir::new().unwrap();
21892189
let table_location = tmp_dir.path().to_str().unwrap().to_string();
2190-
let file_path = format!("{}/multi_row_group.parquet", &table_location);
2190+
let file_path = format!("{table_location}/multi_row_group.parquet");
21912191

21922192
// Force each batch into its own row group for testing byte range filtering.
21932193
let batch1 = RecordBatch::try_new(arrow_schema.clone(), vec![Arc::new(Int32Array::from(
@@ -2385,7 +2385,7 @@ message schema {
23852385
let props = WriterProperties::builder()
23862386
.set_compression(Compression::SNAPPY)
23872387
.build();
2388-
let file = File::create(format!("{}/old_file.parquet", &table_location)).unwrap();
2388+
let file = File::create(format!("{table_location}/old_file.parquet")).unwrap();
23892389
let mut writer = ArrowWriter::try_new(file, to_write.schema(), Some(props)).unwrap();
23902390
writer.write(&to_write).expect("Writing batch");
23912391
writer.close().unwrap();
@@ -2488,7 +2488,7 @@ message schema {
24882488
// Step 1: Create data file with 200 rows in 2 row groups
24892489
// Row group 0: rows 0-99 (ids 1-100)
24902490
// Row group 1: rows 100-199 (ids 101-200)
2491-
let data_file_path = format!("{}/data.parquet", &table_location);
2491+
let data_file_path = format!("{table_location}/data.parquet");
24922492

24932493
let batch1 = RecordBatch::try_new(arrow_schema.clone(), vec![Arc::new(
24942494
Int32Array::from_iter_values(1..=100),
@@ -2522,7 +2522,7 @@ message schema {
25222522
);
25232523

25242524
// Step 2: Create position delete file that deletes row 199 (id=200, last row in row group 1)
2525-
let delete_file_path = format!("{}/deletes.parquet", &table_location);
2525+
let delete_file_path = format!("{table_location}/deletes.parquet");
25262526

25272527
let delete_schema = Arc::new(ArrowSchema::new(vec![
25282528
Field::new("file_path", DataType::Utf8, false).with_metadata(HashMap::from([(
@@ -2584,15 +2584,14 @@ message schema {
25842584
// Step 4: Verify we got 199 rows (not 200)
25852585
let total_rows: usize = result.iter().map(|b| b.num_rows()).sum();
25862586

2587-
println!("Total rows read: {}", total_rows);
2587+
println!("Total rows read: {total_rows}");
25882588
println!("Expected: 199 rows (deleted row 199 which had id=200)");
25892589

25902590
// This assertion will FAIL before the fix and PASS after the fix
25912591
assert_eq!(
25922592
total_rows, 199,
2593-
"Expected 199 rows after deleting row 199, but got {} rows. \
2594-
The bug causes position deletes in later row groups to be ignored.",
2595-
total_rows
2593+
"Expected 199 rows after deleting row 199, but got {total_rows} rows. \
2594+
The bug causes position deletes in later row groups to be ignored."
25962595
);
25972596

25982597
// Verify the deleted row (id=200) is not present
@@ -2679,7 +2678,7 @@ message schema {
26792678
// Step 1: Create data file with 200 rows in 2 row groups
26802679
// Row group 0: rows 0-99 (ids 1-100)
26812680
// Row group 1: rows 100-199 (ids 101-200)
2682-
let data_file_path = format!("{}/data.parquet", &table_location);
2681+
let data_file_path = format!("{table_location}/data.parquet");
26832682

26842683
let batch1 = RecordBatch::try_new(arrow_schema.clone(), vec![Arc::new(
26852684
Int32Array::from_iter_values(1..=100),
@@ -2713,7 +2712,7 @@ message schema {
27132712
);
27142713

27152714
// Step 2: Create position delete file that deletes row 199 (id=200, last row in row group 1)
2716-
let delete_file_path = format!("{}/deletes.parquet", &table_location);
2715+
let delete_file_path = format!("{table_location}/deletes.parquet");
27172716

27182717
let delete_schema = Arc::new(ArrowSchema::new(vec![
27192718
Field::new("file_path", DataType::Utf8, false).with_metadata(HashMap::from([(
@@ -2800,16 +2799,15 @@ message schema {
28002799
// Row group 1 has 100 rows (ids 101-200), minus 1 delete (id=200) = 99 rows
28012800
let total_rows: usize = result.iter().map(|b| b.num_rows()).sum();
28022801

2803-
println!("Total rows read from row group 1: {}", total_rows);
2802+
println!("Total rows read from row group 1: {total_rows}");
28042803
println!("Expected: 99 rows (row group 1 has 100 rows, 1 delete at position 199)");
28052804

28062805
// This assertion will FAIL before the fix and PASS after the fix
28072806
assert_eq!(
28082807
total_rows, 99,
2809-
"Expected 99 rows from row group 1 after deleting position 199, but got {} rows. \
2808+
"Expected 99 rows from row group 1 after deleting position 199, but got {total_rows} rows. \
28102809
The bug causes position deletes to be lost when advance_to() is followed by next() \
2811-
when skipping unselected row groups.",
2812-
total_rows
2810+
when skipping unselected row groups."
28132811
);
28142812

28152813
// Verify the deleted row (id=200) is not present
@@ -2898,7 +2896,7 @@ message schema {
28982896
// Step 1: Create data file with 200 rows in 2 row groups
28992897
// Row group 0: rows 0-99 (ids 1-100)
29002898
// Row group 1: rows 100-199 (ids 101-200)
2901-
let data_file_path = format!("{}/data.parquet", &table_location);
2899+
let data_file_path = format!("{table_location}/data.parquet");
29022900

29032901
let batch1 = RecordBatch::try_new(arrow_schema.clone(), vec![Arc::new(
29042902
Int32Array::from_iter_values(1..=100),
@@ -2932,7 +2930,7 @@ message schema {
29322930
);
29332931

29342932
// Step 2: Create position delete file that deletes row 0 (id=1, first row in row group 0)
2935-
let delete_file_path = format!("{}/deletes.parquet", &table_location);
2933+
let delete_file_path = format!("{table_location}/deletes.parquet");
29362934

29372935
let delete_schema = Arc::new(ArrowSchema::new(vec![
29382936
Field::new("file_path", DataType::Utf8, false).with_metadata(HashMap::from([(
@@ -3075,7 +3073,7 @@ message schema {
30753073
.set_compression(Compression::SNAPPY)
30763074
.build();
30773075

3078-
let file = File::create(format!("{}/1.parquet", &table_location)).unwrap();
3076+
let file = File::create(format!("{table_location}/1.parquet")).unwrap();
30793077
let mut writer = ArrowWriter::try_new(file, to_write.schema(), Some(props)).unwrap();
30803078

30813079
writer.write(&to_write).expect("Writing batch");
@@ -3088,7 +3086,7 @@ message schema {
30883086
start: 0,
30893087
length: 0,
30903088
record_count: None,
3091-
data_file_path: format!("{}/1.parquet", table_location),
3089+
data_file_path: format!("{table_location}/1.parquet"),
30923090
data_file_format: DataFileFormat::Parquet,
30933091
schema: schema.clone(),
30943092
project_field_ids: vec![1, 2],
@@ -3169,7 +3167,7 @@ message schema {
31693167
.set_compression(Compression::SNAPPY)
31703168
.build();
31713169

3172-
let file = File::create(format!("{}/1.parquet", &table_location)).unwrap();
3170+
let file = File::create(format!("{table_location}/1.parquet")).unwrap();
31733171
let mut writer = ArrowWriter::try_new(file, to_write.schema(), Some(props)).unwrap();
31743172

31753173
writer.write(&to_write).expect("Writing batch");
@@ -3182,7 +3180,7 @@ message schema {
31823180
start: 0,
31833181
length: 0,
31843182
record_count: None,
3185-
data_file_path: format!("{}/1.parquet", table_location),
3183+
data_file_path: format!("{table_location}/1.parquet"),
31863184
data_file_format: DataFileFormat::Parquet,
31873185
schema: schema.clone(),
31883186
project_field_ids: vec![1, 3],
@@ -3252,7 +3250,7 @@ message schema {
32523250
.set_compression(Compression::SNAPPY)
32533251
.build();
32543252

3255-
let file = File::create(format!("{}/1.parquet", &table_location)).unwrap();
3253+
let file = File::create(format!("{table_location}/1.parquet")).unwrap();
32563254
let mut writer = ArrowWriter::try_new(file, to_write.schema(), Some(props)).unwrap();
32573255

32583256
writer.write(&to_write).expect("Writing batch");
@@ -3265,7 +3263,7 @@ message schema {
32653263
start: 0,
32663264
length: 0,
32673265
record_count: None,
3268-
data_file_path: format!("{}/1.parquet", table_location),
3266+
data_file_path: format!("{table_location}/1.parquet"),
32693267
data_file_format: DataFileFormat::Parquet,
32703268
schema: schema.clone(),
32713269
project_field_ids: vec![1, 2, 3],
@@ -3337,7 +3335,7 @@ message schema {
33373335
.set_max_row_group_size(2)
33383336
.build();
33393337

3340-
let file = File::create(format!("{}/1.parquet", &table_location)).unwrap();
3338+
let file = File::create(format!("{table_location}/1.parquet")).unwrap();
33413339
let mut writer = ArrowWriter::try_new(file, arrow_schema.clone(), Some(props)).unwrap();
33423340

33433341
// Write 6 rows in 3 batches (will create 3 row groups)
@@ -3362,7 +3360,7 @@ message schema {
33623360
start: 0,
33633361
length: 0,
33643362
record_count: None,
3365-
data_file_path: format!("{}/1.parquet", table_location),
3363+
data_file_path: format!("{table_location}/1.parquet"),
33663364
data_file_format: DataFileFormat::Parquet,
33673365
schema: schema.clone(),
33683366
project_field_ids: vec![1, 2],
@@ -3400,7 +3398,7 @@ message schema {
34003398
assert_eq!(all_values.len(), 6);
34013399

34023400
for i in 0..6 {
3403-
assert_eq!(all_names[i], format!("name_{}", i));
3401+
assert_eq!(all_names[i], format!("name_{i}"));
34043402
assert_eq!(all_values[i], i as i32);
34053403
}
34063404
}
@@ -3475,7 +3473,7 @@ message schema {
34753473
.set_compression(Compression::SNAPPY)
34763474
.build();
34773475

3478-
let file = File::create(format!("{}/1.parquet", &table_location)).unwrap();
3476+
let file = File::create(format!("{table_location}/1.parquet")).unwrap();
34793477
let mut writer = ArrowWriter::try_new(file, to_write.schema(), Some(props)).unwrap();
34803478

34813479
writer.write(&to_write).expect("Writing batch");
@@ -3488,7 +3486,7 @@ message schema {
34883486
start: 0,
34893487
length: 0,
34903488
record_count: None,
3491-
data_file_path: format!("{}/1.parquet", table_location),
3489+
data_file_path: format!("{table_location}/1.parquet"),
34923490
data_file_format: DataFileFormat::Parquet,
34933491
schema: schema.clone(),
34943492
project_field_ids: vec![1, 2],
@@ -3569,7 +3567,7 @@ message schema {
35693567
.set_compression(Compression::SNAPPY)
35703568
.build();
35713569

3572-
let file = File::create(format!("{}/1.parquet", &table_location)).unwrap();
3570+
let file = File::create(format!("{table_location}/1.parquet")).unwrap();
35733571
let mut writer = ArrowWriter::try_new(file, to_write.schema(), Some(props)).unwrap();
35743572
writer.write(&to_write).expect("Writing batch");
35753573
writer.close().unwrap();
@@ -3581,7 +3579,7 @@ message schema {
35813579
start: 0,
35823580
length: 0,
35833581
record_count: None,
3584-
data_file_path: format!("{}/1.parquet", table_location),
3582+
data_file_path: format!("{table_location}/1.parquet"),
35853583
data_file_format: DataFileFormat::Parquet,
35863584
schema: schema.clone(),
35873585
project_field_ids: vec![1, 5, 2],
@@ -3668,7 +3666,7 @@ message schema {
36683666
.set_compression(Compression::SNAPPY)
36693667
.build();
36703668

3671-
let file = File::create(format!("{}/1.parquet", &table_location)).unwrap();
3669+
let file = File::create(format!("{table_location}/1.parquet")).unwrap();
36723670
let mut writer = ArrowWriter::try_new(file, to_write.schema(), Some(props)).unwrap();
36733671
writer.write(&to_write).expect("Writing batch");
36743672
writer.close().unwrap();
@@ -3687,7 +3685,7 @@ message schema {
36873685
start: 0,
36883686
length: 0,
36893687
record_count: None,
3690-
data_file_path: format!("{}/1.parquet", table_location),
3688+
data_file_path: format!("{table_location}/1.parquet"),
36913689
data_file_format: DataFileFormat::Parquet,
36923690
schema: schema.clone(),
36933691
project_field_ids: vec![1, 2, 3],

crates/iceberg/src/expr/visitors/page_index_evaluator.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -792,9 +792,10 @@ impl BoundPredicateVisitor for PageIndexEvaluator<'_> {
792792
// 2. Contribute test utilities to arrow-rs parquet crate
793793
// 3. Use parquet's internal test module approach (requires being in same crate)
794794

795-
#[cfg(all(test, feature = "page_index_tests_disabled"))]
796795
#[allow(unexpected_cfgs)]
796+
#[cfg(all(test, feature = "page_index_tests_disabled"))]
797797
mod tests {
798+
#![allow(unexpected_cfgs)]
798799
use std::collections::HashMap;
799800
use std::sync::Arc;
800801

crates/iceberg/src/io/storage.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ impl Storage {
175175
} else {
176176
Err(Error::new(
177177
ErrorKind::DataInvalid,
178-
format!("Invalid gcs url: {}, should start with {}", path, prefix),
178+
format!("Invalid gcs url: {path}, should start with {prefix}"),
179179
))
180180
}
181181
}
@@ -190,7 +190,7 @@ impl Storage {
190190
} else {
191191
Err(Error::new(
192192
ErrorKind::DataInvalid,
193-
format!("Invalid oss url: {}, should start with {}", path, prefix),
193+
format!("Invalid oss url: {path}, should start with {prefix}"),
194194
))
195195
}
196196
}

crates/iceberg/src/io/storage_azdls.rs

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ impl FromStr for AzureStorageScheme {
165165
"wasbs" => Ok(AzureStorageScheme::Wasbs),
166166
_ => Err(Error::new(
167167
ErrorKind::DataInvalid,
168-
format!("Unexpected Azure Storage scheme: {}", s),
168+
format!("Unexpected Azure Storage scheme: {s}"),
169169
)),
170170
}
171171
}
@@ -397,11 +397,11 @@ mod tests {
397397
let config = azdls_config_parse(properties);
398398
match expected {
399399
Some(expected_config) => {
400-
assert!(config.is_ok(), "Test case {} failed: {:?}", name, config);
401-
assert_eq!(config.unwrap(), expected_config, "Test case: {}", name);
400+
assert!(config.is_ok(), "Test case {name} failed: {config:?}");
401+
assert_eq!(config.unwrap(), expected_config, "Test case: {name}");
402402
}
403403
None => {
404-
assert!(config.is_err(), "Test case {} expected error.", name);
404+
assert!(config.is_err(), "Test case {name} expected error.");
405405
}
406406
}
407407
}
@@ -495,14 +495,14 @@ mod tests {
495495
let result = azdls_create_operator(input.0, &input.1, &input.2);
496496
match expected {
497497
Some((expected_filesystem, expected_path)) => {
498-
assert!(result.is_ok(), "Test case {} failed: {:?}", name, result);
498+
assert!(result.is_ok(), "Test case {name} failed: {result:?}");
499499

500500
let (op, relative_path) = result.unwrap();
501501
assert_eq!(op.info().name(), expected_filesystem);
502502
assert_eq!(relative_path, expected_path);
503503
}
504504
None => {
505-
assert!(result.is_err(), "Test case {} expected error.", name);
505+
assert!(result.is_err(), "Test case {name} expected error.");
506506
}
507507
}
508508
}
@@ -543,11 +543,11 @@ mod tests {
543543
let result = input.parse::<AzureStoragePath>();
544544
match expected {
545545
Some(expected_path) => {
546-
assert!(result.is_ok(), "Test case {} failed: {:?}", name, result);
547-
assert_eq!(result.unwrap(), expected_path, "Test case: {}", name);
546+
assert!(result.is_ok(), "Test case {name} failed: {result:?}");
547+
assert_eq!(result.unwrap(), expected_path, "Test case: {name}");
548548
}
549549
None => {
550-
assert!(result.is_err(), "Test case {} expected error.", name);
550+
assert!(result.is_err(), "Test case {name} expected error.");
551551
}
552552
}
553553
}
@@ -593,7 +593,7 @@ mod tests {
593593

594594
for (name, path, expected) in test_cases {
595595
let endpoint = path.as_endpoint();
596-
assert_eq!(endpoint, expected, "Test case: {}", name);
596+
assert_eq!(endpoint, expected, "Test case: {name}");
597597
}
598598
}
599599
}

crates/iceberg/src/io/storage_gcs.rs

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -71,20 +71,20 @@ pub(crate) fn gcs_config_parse(mut m: HashMap<String, String>) -> Result<GcsConf
7171
cfg.disable_config_load = true;
7272
}
7373

74-
if let Some(allow_anonymous) = m.remove(GCS_ALLOW_ANONYMOUS) {
75-
if is_truthy(allow_anonymous.to_lowercase().as_str()) {
76-
cfg.allow_anonymous = true;
77-
}
74+
if let Some(allow_anonymous) = m.remove(GCS_ALLOW_ANONYMOUS)
75+
&& is_truthy(allow_anonymous.to_lowercase().as_str())
76+
{
77+
cfg.allow_anonymous = true;
7878
}
79-
if let Some(disable_ec2_metadata) = m.remove(GCS_DISABLE_VM_METADATA) {
80-
if is_truthy(disable_ec2_metadata.to_lowercase().as_str()) {
81-
cfg.disable_vm_metadata = true;
82-
}
79+
if let Some(disable_ec2_metadata) = m.remove(GCS_DISABLE_VM_METADATA)
80+
&& is_truthy(disable_ec2_metadata.to_lowercase().as_str())
81+
{
82+
cfg.disable_vm_metadata = true;
8383
};
84-
if let Some(disable_config_load) = m.remove(GCS_DISABLE_CONFIG_LOAD) {
85-
if is_truthy(disable_config_load.to_lowercase().as_str()) {
86-
cfg.disable_config_load = true;
87-
}
84+
if let Some(disable_config_load) = m.remove(GCS_DISABLE_CONFIG_LOAD)
85+
&& is_truthy(disable_config_load.to_lowercase().as_str())
86+
{
87+
cfg.disable_config_load = true;
8888
};
8989

9090
Ok(cfg)
@@ -96,7 +96,7 @@ pub(crate) fn gcs_config_build(cfg: &GcsConfig, path: &str) -> Result<Operator>
9696
let bucket = url.host_str().ok_or_else(|| {
9797
Error::new(
9898
ErrorKind::DataInvalid,
99-
format!("Invalid gcs url: {}, bucket is required", path),
99+
format!("Invalid gcs url: {path}, bucket is required"),
100100
)
101101
})?;
102102

0 commit comments

Comments
 (0)