@@ -504,10 +504,10 @@ impl ArrowReader {
504504 // we need to call next() to update the cache with the newly positioned value.
505505 delete_vector_iter. advance_to ( next_row_group_base_idx) ;
506506 // Only update the cache if the cached value is stale (in the skipped range)
507- if let Some ( cached_idx) = next_deleted_row_idx_opt {
508- if cached_idx < next_row_group_base_idx {
509- next_deleted_row_idx_opt = delete_vector_iter . next ( ) ;
510- }
507+ if let Some ( cached_idx) = next_deleted_row_idx_opt
508+ && cached_idx < next_row_group_base_idx
509+ {
510+ next_deleted_row_idx_opt = delete_vector_iter . next ( ) ;
511511 }
512512
513513 // still increment the current page base index but then skip to the next row group
@@ -861,10 +861,10 @@ impl ArrowReader {
861861 } ;
862862
863863 // If all row groups were filtered out, return an empty RowSelection (select no rows)
864- if let Some ( selected_row_groups) = selected_row_groups {
865- if selected_row_groups. is_empty ( ) {
866- return Ok ( RowSelection :: from ( Vec :: new ( ) ) ) ;
867- }
864+ if let Some ( selected_row_groups) = selected_row_groups
865+ && selected_row_groups. is_empty ( )
866+ {
867+ return Ok ( RowSelection :: from ( Vec :: new ( ) ) ) ;
868868 }
869869
870870 let mut selected_row_groups_idx = 0 ;
@@ -897,10 +897,10 @@ impl ArrowReader {
897897
898898 results. push ( selections_for_page) ;
899899
900- if let Some ( selected_row_groups) = selected_row_groups {
901- if selected_row_groups_idx == selected_row_groups. len ( ) {
902- break ;
903- }
900+ if let Some ( selected_row_groups) = selected_row_groups
901+ && selected_row_groups_idx == selected_row_groups. len ( )
902+ {
903+ break ;
904904 }
905905 }
906906
@@ -1031,13 +1031,13 @@ fn apply_name_mapping_to_arrow_schema(
10311031
10321032 let mut metadata = field. metadata ( ) . clone ( ) ;
10331033
1034- if let Some ( mapped_field) = mapped_field_opt {
1035- if let Some ( field_id) = mapped_field. field_id ( ) {
1036- // Field found in mapping with a field_id → assign it
1037- metadata. insert ( PARQUET_FIELD_ID_META_KEY . to_string ( ) , field_id. to_string ( ) ) ;
1038- }
1039- // If field_id is None, leave the field without an ID (will be filtered by projection)
1034+ if let Some ( mapped_field) = mapped_field_opt
1035+ && let Some ( field_id) = mapped_field. field_id ( )
1036+ {
1037+ // Field found in mapping with a field_id → assign it
1038+ metadata. insert ( PARQUET_FIELD_ID_META_KEY . to_string ( ) , field_id. to_string ( ) ) ;
10401039 }
1040+ // If field_id is None, leave the field without an ID (will be filtered by projection)
10411041 // If field not found in mapping, leave it without an ID (will be filtered by projection)
10421042
10431043 Field :: new ( field. name ( ) , field. data_type ( ) . clone ( ) , field. is_nullable ( ) )
@@ -2731,15 +2731,14 @@ message schema {
27312731 // Step 4: Verify we got 199 rows (not 200)
27322732 let total_rows: usize = result. iter ( ) . map ( |b| b. num_rows ( ) ) . sum ( ) ;
27332733
2734- println ! ( "Total rows read: {}" , total_rows ) ;
2734+ println ! ( "Total rows read: {total_rows}" ) ;
27352735 println ! ( "Expected: 199 rows (deleted row 199 which had id=200)" ) ;
27362736
27372737 // This assertion will FAIL before the fix and PASS after the fix
27382738 assert_eq ! (
27392739 total_rows, 199 ,
2740- "Expected 199 rows after deleting row 199, but got {} rows. \
2741- The bug causes position deletes in later row groups to be ignored.",
2742- total_rows
2740+ "Expected 199 rows after deleting row 199, but got {total_rows} rows. \
2741+ The bug causes position deletes in later row groups to be ignored."
27432742 ) ;
27442743
27452744 // Verify the deleted row (id=200) is not present
@@ -2950,16 +2949,15 @@ message schema {
29502949 // Row group 1 has 100 rows (ids 101-200), minus 1 delete (id=200) = 99 rows
29512950 let total_rows: usize = result. iter ( ) . map ( |b| b. num_rows ( ) ) . sum ( ) ;
29522951
2953- println ! ( "Total rows read from row group 1: {}" , total_rows ) ;
2952+ println ! ( "Total rows read from row group 1: {total_rows}" ) ;
29542953 println ! ( "Expected: 99 rows (row group 1 has 100 rows, 1 delete at position 199)" ) ;
29552954
29562955 // This assertion will FAIL before the fix and PASS after the fix
29572956 assert_eq ! (
29582957 total_rows, 99 ,
2959- "Expected 99 rows from row group 1 after deleting position 199, but got {} rows. \
2958+ "Expected 99 rows from row group 1 after deleting position 199, but got {total_rows } rows. \
29602959 The bug causes position deletes to be lost when advance_to() is followed by next() \
2961- when skipping unselected row groups.",
2962- total_rows
2960+ when skipping unselected row groups."
29632961 ) ;
29642962
29652963 // Verify the deleted row (id=200) is not present
@@ -3241,7 +3239,7 @@ message schema {
32413239 start: 0 ,
32423240 length: 0 ,
32433241 record_count: None ,
3244- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3242+ data_file_path: format!( "{table_location }/1.parquet" ) ,
32453243 data_file_format: DataFileFormat :: Parquet ,
32463244 schema: schema. clone( ) ,
32473245 project_field_ids: vec![ 1 , 2 ] ,
@@ -3338,7 +3336,7 @@ message schema {
33383336 start: 0 ,
33393337 length: 0 ,
33403338 record_count: None ,
3341- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3339+ data_file_path: format!( "{table_location }/1.parquet" ) ,
33423340 data_file_format: DataFileFormat :: Parquet ,
33433341 schema: schema. clone( ) ,
33443342 project_field_ids: vec![ 1 , 3 ] ,
@@ -3424,7 +3422,7 @@ message schema {
34243422 start: 0 ,
34253423 length: 0 ,
34263424 record_count: None ,
3427- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3425+ data_file_path: format!( "{table_location }/1.parquet" ) ,
34283426 data_file_format: DataFileFormat :: Parquet ,
34293427 schema: schema. clone( ) ,
34303428 project_field_ids: vec![ 1 , 2 , 3 ] ,
@@ -3524,7 +3522,7 @@ message schema {
35243522 start: 0 ,
35253523 length: 0 ,
35263524 record_count: None ,
3527- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3525+ data_file_path: format!( "{table_location }/1.parquet" ) ,
35283526 data_file_format: DataFileFormat :: Parquet ,
35293527 schema: schema. clone( ) ,
35303528 project_field_ids: vec![ 1 , 2 ] ,
@@ -3565,7 +3563,7 @@ message schema {
35653563 assert_eq ! ( all_values. len( ) , 6 ) ;
35663564
35673565 for i in 0 ..6 {
3568- assert_eq ! ( all_names[ i] , format!( "name_{}" , i ) ) ;
3566+ assert_eq ! ( all_names[ i] , format!( "name_{i}" ) ) ;
35693567 assert_eq ! ( all_values[ i] , i as i32 ) ;
35703568 }
35713569 }
@@ -3653,7 +3651,7 @@ message schema {
36533651 start: 0 ,
36543652 length: 0 ,
36553653 record_count: None ,
3656- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3654+ data_file_path: format!( "{table_location }/1.parquet" ) ,
36573655 data_file_format: DataFileFormat :: Parquet ,
36583656 schema: schema. clone( ) ,
36593657 project_field_ids: vec![ 1 , 2 ] ,
@@ -3749,7 +3747,7 @@ message schema {
37493747 start: 0 ,
37503748 length: 0 ,
37513749 record_count: None ,
3752- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3750+ data_file_path: format!( "{table_location }/1.parquet" ) ,
37533751 data_file_format: DataFileFormat :: Parquet ,
37543752 schema: schema. clone( ) ,
37553753 project_field_ids: vec![ 1 , 5 , 2 ] ,
@@ -3858,7 +3856,7 @@ message schema {
38583856 start: 0 ,
38593857 length: 0 ,
38603858 record_count: None ,
3861- data_file_path: format!( "{}/1.parquet" , table_location ) ,
3859+ data_file_path: format!( "{table_location }/1.parquet" ) ,
38623860 data_file_format: DataFileFormat :: Parquet ,
38633861 schema: schema. clone( ) ,
38643862 project_field_ids: vec![ 1 , 2 , 3 ] ,
@@ -3997,7 +3995,7 @@ message schema {
39973995 start: 0 ,
39983996 length: 0 ,
39993997 record_count: None ,
4000- data_file_path: format!( "{}/data.parquet" , table_location ) ,
3998+ data_file_path: format!( "{table_location }/data.parquet" ) ,
40013999 data_file_format: DataFileFormat :: Parquet ,
40024000 schema: schema. clone( ) ,
40034001 project_field_ids: vec![ 1 , 2 ] ,
0 commit comments