@@ -2006,7 +2006,7 @@ message schema {
20062006 . set_compression ( Compression :: SNAPPY )
20072007 . build ( ) ;
20082008
2009- let file = File :: create ( format ! ( "{}/1.parquet" , & table_location ) ) . unwrap ( ) ;
2009+ let file = File :: create ( format ! ( "{table_location }/1.parquet" ) ) . unwrap ( ) ;
20102010 let mut writer =
20112011 ArrowWriter :: try_new ( file, to_write. schema ( ) , Some ( props. clone ( ) ) ) . unwrap ( ) ;
20122012
@@ -2187,7 +2187,7 @@ message schema {
21872187
21882188 let tmp_dir = TempDir :: new ( ) . unwrap ( ) ;
21892189 let table_location = tmp_dir. path ( ) . to_str ( ) . unwrap ( ) . to_string ( ) ;
2190- let file_path = format ! ( "{}/multi_row_group.parquet" , & table_location ) ;
2190+ let file_path = format ! ( "{table_location }/multi_row_group.parquet" ) ;
21912191
21922192 // Force each batch into its own row group for testing byte range filtering.
21932193 let batch1 = RecordBatch :: try_new ( arrow_schema. clone ( ) , vec ! [ Arc :: new( Int32Array :: from(
@@ -2385,7 +2385,7 @@ message schema {
23852385 let props = WriterProperties :: builder ( )
23862386 . set_compression ( Compression :: SNAPPY )
23872387 . build ( ) ;
2388- let file = File :: create ( format ! ( "{}/old_file.parquet" , & table_location ) ) . unwrap ( ) ;
2388+ let file = File :: create ( format ! ( "{table_location }/old_file.parquet" ) ) . unwrap ( ) ;
23892389 let mut writer = ArrowWriter :: try_new ( file, to_write. schema ( ) , Some ( props) ) . unwrap ( ) ;
23902390 writer. write ( & to_write) . expect ( "Writing batch" ) ;
23912391 writer. close ( ) . unwrap ( ) ;
@@ -2488,7 +2488,7 @@ message schema {
24882488 // Step 1: Create data file with 200 rows in 2 row groups
24892489 // Row group 0: rows 0-99 (ids 1-100)
24902490 // Row group 1: rows 100-199 (ids 101-200)
2491- let data_file_path = format ! ( "{}/data.parquet" , & table_location ) ;
2491+ let data_file_path = format ! ( "{table_location }/data.parquet" ) ;
24922492
24932493 let batch1 = RecordBatch :: try_new ( arrow_schema. clone ( ) , vec ! [ Arc :: new(
24942494 Int32Array :: from_iter_values( 1 ..=100 ) ,
@@ -2522,7 +2522,7 @@ message schema {
25222522 ) ;
25232523
25242524 // Step 2: Create position delete file that deletes row 199 (id=200, last row in row group 1)
2525- let delete_file_path = format ! ( "{}/deletes.parquet" , & table_location ) ;
2525+ let delete_file_path = format ! ( "{table_location }/deletes.parquet" ) ;
25262526
25272527 let delete_schema = Arc :: new ( ArrowSchema :: new ( vec ! [
25282528 Field :: new( "file_path" , DataType :: Utf8 , false ) . with_metadata( HashMap :: from( [ (
@@ -2584,15 +2584,14 @@ message schema {
25842584 // Step 4: Verify we got 199 rows (not 200)
25852585 let total_rows: usize = result. iter ( ) . map ( |b| b. num_rows ( ) ) . sum ( ) ;
25862586
2587- println ! ( "Total rows read: {}" , total_rows ) ;
2587+ println ! ( "Total rows read: {total_rows}" ) ;
25882588 println ! ( "Expected: 199 rows (deleted row 199 which had id=200)" ) ;
25892589
25902590 // This assertion will FAIL before the fix and PASS after the fix
25912591 assert_eq ! (
25922592 total_rows, 199 ,
2593- "Expected 199 rows after deleting row 199, but got {} rows. \
2594- The bug causes position deletes in later row groups to be ignored.",
2595- total_rows
2593+ "Expected 199 rows after deleting row 199, but got {total_rows} rows. \
2594+ The bug causes position deletes in later row groups to be ignored."
25962595 ) ;
25972596
25982597 // Verify the deleted row (id=200) is not present
@@ -2679,7 +2678,7 @@ message schema {
26792678 // Step 1: Create data file with 200 rows in 2 row groups
26802679 // Row group 0: rows 0-99 (ids 1-100)
26812680 // Row group 1: rows 100-199 (ids 101-200)
2682- let data_file_path = format ! ( "{}/data.parquet" , & table_location ) ;
2681+ let data_file_path = format ! ( "{table_location }/data.parquet" ) ;
26832682
26842683 let batch1 = RecordBatch :: try_new ( arrow_schema. clone ( ) , vec ! [ Arc :: new(
26852684 Int32Array :: from_iter_values( 1 ..=100 ) ,
@@ -2713,7 +2712,7 @@ message schema {
27132712 ) ;
27142713
27152714 // Step 2: Create position delete file that deletes row 199 (id=200, last row in row group 1)
2716- let delete_file_path = format ! ( "{}/deletes.parquet" , & table_location ) ;
2715+ let delete_file_path = format ! ( "{table_location }/deletes.parquet" ) ;
27172716
27182717 let delete_schema = Arc :: new ( ArrowSchema :: new ( vec ! [
27192718 Field :: new( "file_path" , DataType :: Utf8 , false ) . with_metadata( HashMap :: from( [ (
@@ -2800,16 +2799,15 @@ message schema {
28002799 // Row group 1 has 100 rows (ids 101-200), minus 1 delete (id=200) = 99 rows
28012800 let total_rows: usize = result. iter ( ) . map ( |b| b. num_rows ( ) ) . sum ( ) ;
28022801
2803- println ! ( "Total rows read from row group 1: {}" , total_rows ) ;
2802+ println ! ( "Total rows read from row group 1: {total_rows}" ) ;
28042803 println ! ( "Expected: 99 rows (row group 1 has 100 rows, 1 delete at position 199)" ) ;
28052804
28062805 // This assertion will FAIL before the fix and PASS after the fix
28072806 assert_eq ! (
28082807 total_rows, 99 ,
2809- "Expected 99 rows from row group 1 after deleting position 199, but got {} rows. \
2808+ "Expected 99 rows from row group 1 after deleting position 199, but got {total_rows } rows. \
28102809 The bug causes position deletes to be lost when advance_to() is followed by next() \
2811- when skipping unselected row groups.",
2812- total_rows
2810+ when skipping unselected row groups."
28132811 ) ;
28142812
28152813 // Verify the deleted row (id=200) is not present
@@ -2898,7 +2896,7 @@ message schema {
28982896 // Step 1: Create data file with 200 rows in 2 row groups
28992897 // Row group 0: rows 0-99 (ids 1-100)
29002898 // Row group 1: rows 100-199 (ids 101-200)
2901- let data_file_path = format ! ( "{}/data.parquet" , & table_location ) ;
2899+ let data_file_path = format ! ( "{table_location }/data.parquet" ) ;
29022900
29032901 let batch1 = RecordBatch :: try_new ( arrow_schema. clone ( ) , vec ! [ Arc :: new(
29042902 Int32Array :: from_iter_values( 1 ..=100 ) ,
@@ -2932,7 +2930,7 @@ message schema {
29322930 ) ;
29332931
29342932 // Step 2: Create position delete file that deletes row 0 (id=1, first row in row group 0)
2935- let delete_file_path = format ! ( "{}/deletes.parquet" , & table_location ) ;
2933+ let delete_file_path = format ! ( "{table_location }/deletes.parquet" ) ;
29362934
29372935 let delete_schema = Arc :: new ( ArrowSchema :: new ( vec ! [
29382936 Field :: new( "file_path" , DataType :: Utf8 , false ) . with_metadata( HashMap :: from( [ (
@@ -3075,7 +3073,7 @@ message schema {
30753073 . set_compression ( Compression :: SNAPPY )
30763074 . build ( ) ;
30773075
3078- let file = File :: create ( format ! ( "{}/1.parquet" , & table_location ) ) . unwrap ( ) ;
3076+ let file = File :: create ( format ! ( "{table_location }/1.parquet" ) ) . unwrap ( ) ;
30793077 let mut writer = ArrowWriter :: try_new ( file, to_write. schema ( ) , Some ( props) ) . unwrap ( ) ;
30803078
30813079 writer. write ( & to_write) . expect ( "Writing batch" ) ;
@@ -3088,7 +3086,7 @@ message schema {
30883086 start: 0 ,
30893087 length: 0 ,
30903088 record_count: None ,
3091- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3089+ data_file_path: format!( "{table_location }/1.parquet" ) ,
30923090 data_file_format: DataFileFormat :: Parquet ,
30933091 schema: schema. clone( ) ,
30943092 project_field_ids: vec![ 1 , 2 ] ,
@@ -3169,7 +3167,7 @@ message schema {
31693167 . set_compression ( Compression :: SNAPPY )
31703168 . build ( ) ;
31713169
3172- let file = File :: create ( format ! ( "{}/1.parquet" , & table_location ) ) . unwrap ( ) ;
3170+ let file = File :: create ( format ! ( "{table_location }/1.parquet" ) ) . unwrap ( ) ;
31733171 let mut writer = ArrowWriter :: try_new ( file, to_write. schema ( ) , Some ( props) ) . unwrap ( ) ;
31743172
31753173 writer. write ( & to_write) . expect ( "Writing batch" ) ;
@@ -3182,7 +3180,7 @@ message schema {
31823180 start: 0 ,
31833181 length: 0 ,
31843182 record_count: None ,
3185- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3183+ data_file_path: format!( "{table_location }/1.parquet" ) ,
31863184 data_file_format: DataFileFormat :: Parquet ,
31873185 schema: schema. clone( ) ,
31883186 project_field_ids: vec![ 1 , 3 ] ,
@@ -3252,7 +3250,7 @@ message schema {
32523250 . set_compression ( Compression :: SNAPPY )
32533251 . build ( ) ;
32543252
3255- let file = File :: create ( format ! ( "{}/1.parquet" , & table_location ) ) . unwrap ( ) ;
3253+ let file = File :: create ( format ! ( "{table_location }/1.parquet" ) ) . unwrap ( ) ;
32563254 let mut writer = ArrowWriter :: try_new ( file, to_write. schema ( ) , Some ( props) ) . unwrap ( ) ;
32573255
32583256 writer. write ( & to_write) . expect ( "Writing batch" ) ;
@@ -3265,7 +3263,7 @@ message schema {
32653263 start: 0 ,
32663264 length: 0 ,
32673265 record_count: None ,
3268- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3266+ data_file_path: format!( "{table_location }/1.parquet" ) ,
32693267 data_file_format: DataFileFormat :: Parquet ,
32703268 schema: schema. clone( ) ,
32713269 project_field_ids: vec![ 1 , 2 , 3 ] ,
@@ -3337,7 +3335,7 @@ message schema {
33373335 . set_max_row_group_size ( 2 )
33383336 . build ( ) ;
33393337
3340- let file = File :: create ( format ! ( "{}/1.parquet" , & table_location ) ) . unwrap ( ) ;
3338+ let file = File :: create ( format ! ( "{table_location }/1.parquet" ) ) . unwrap ( ) ;
33413339 let mut writer = ArrowWriter :: try_new ( file, arrow_schema. clone ( ) , Some ( props) ) . unwrap ( ) ;
33423340
33433341 // Write 6 rows in 3 batches (will create 3 row groups)
@@ -3362,7 +3360,7 @@ message schema {
33623360 start: 0 ,
33633361 length: 0 ,
33643362 record_count: None ,
3365- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3363+ data_file_path: format!( "{table_location }/1.parquet" ) ,
33663364 data_file_format: DataFileFormat :: Parquet ,
33673365 schema: schema. clone( ) ,
33683366 project_field_ids: vec![ 1 , 2 ] ,
@@ -3400,7 +3398,7 @@ message schema {
34003398 assert_eq ! ( all_values. len( ) , 6 ) ;
34013399
34023400 for i in 0 ..6 {
3403- assert_eq ! ( all_names[ i] , format!( "name_{}" , i ) ) ;
3401+ assert_eq ! ( all_names[ i] , format!( "name_{i}" ) ) ;
34043402 assert_eq ! ( all_values[ i] , i as i32 ) ;
34053403 }
34063404 }
@@ -3475,7 +3473,7 @@ message schema {
34753473 . set_compression ( Compression :: SNAPPY )
34763474 . build ( ) ;
34773475
3478- let file = File :: create ( format ! ( "{}/1.parquet" , & table_location ) ) . unwrap ( ) ;
3476+ let file = File :: create ( format ! ( "{table_location }/1.parquet" ) ) . unwrap ( ) ;
34793477 let mut writer = ArrowWriter :: try_new ( file, to_write. schema ( ) , Some ( props) ) . unwrap ( ) ;
34803478
34813479 writer. write ( & to_write) . expect ( "Writing batch" ) ;
@@ -3488,7 +3486,7 @@ message schema {
34883486 start: 0 ,
34893487 length: 0 ,
34903488 record_count: None ,
3491- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3489+ data_file_path: format!( "{table_location }/1.parquet" ) ,
34923490 data_file_format: DataFileFormat :: Parquet ,
34933491 schema: schema. clone( ) ,
34943492 project_field_ids: vec![ 1 , 2 ] ,
@@ -3569,7 +3567,7 @@ message schema {
35693567 . set_compression ( Compression :: SNAPPY )
35703568 . build ( ) ;
35713569
3572- let file = File :: create ( format ! ( "{}/1.parquet" , & table_location ) ) . unwrap ( ) ;
3570+ let file = File :: create ( format ! ( "{table_location }/1.parquet" ) ) . unwrap ( ) ;
35733571 let mut writer = ArrowWriter :: try_new ( file, to_write. schema ( ) , Some ( props) ) . unwrap ( ) ;
35743572 writer. write ( & to_write) . expect ( "Writing batch" ) ;
35753573 writer. close ( ) . unwrap ( ) ;
@@ -3581,7 +3579,7 @@ message schema {
35813579 start: 0 ,
35823580 length: 0 ,
35833581 record_count: None ,
3584- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3582+ data_file_path: format!( "{table_location }/1.parquet" ) ,
35853583 data_file_format: DataFileFormat :: Parquet ,
35863584 schema: schema. clone( ) ,
35873585 project_field_ids: vec![ 1 , 5 , 2 ] ,
@@ -3668,7 +3666,7 @@ message schema {
36683666 . set_compression ( Compression :: SNAPPY )
36693667 . build ( ) ;
36703668
3671- let file = File :: create ( format ! ( "{}/1.parquet" , & table_location ) ) . unwrap ( ) ;
3669+ let file = File :: create ( format ! ( "{table_location }/1.parquet" ) ) . unwrap ( ) ;
36723670 let mut writer = ArrowWriter :: try_new ( file, to_write. schema ( ) , Some ( props) ) . unwrap ( ) ;
36733671 writer. write ( & to_write) . expect ( "Writing batch" ) ;
36743672 writer. close ( ) . unwrap ( ) ;
@@ -3687,7 +3685,7 @@ message schema {
36873685 start: 0 ,
36883686 length: 0 ,
36893687 record_count: None ,
3690- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3688+ data_file_path: format!( "{table_location }/1.parquet" ) ,
36913689 data_file_format: DataFileFormat :: Parquet ,
36923690 schema: schema. clone( ) ,
36933691 project_field_ids: vec![ 1 , 2 , 3 ] ,
0 commit comments