@@ -20,7 +20,7 @@ use std::fmt::{Debug, Formatter};
20
20
use std:: str:: FromStr ;
21
21
use std:: sync:: Arc ;
22
22
23
- use datafusion:: arrow:: array:: { ArrayRef , RecordBatch , StringArray , UInt64Array } ;
23
+ use datafusion:: arrow:: array:: { ArrayRef , RecordBatch , StringArray } ;
24
24
use datafusion:: arrow:: datatypes:: {
25
25
DataType , Field , Schema as ArrowSchema , SchemaRef as ArrowSchemaRef ,
26
26
} ;
@@ -45,7 +45,7 @@ use iceberg::writer::file_writer::ParquetWriterBuilder;
45
45
use iceberg:: writer:: file_writer:: location_generator:: {
46
46
DefaultFileNameGenerator , DefaultLocationGenerator ,
47
47
} ;
48
- use iceberg:: writer:: { CurrentFileStatus , IcebergWriter , IcebergWriterBuilder } ;
48
+ use iceberg:: writer:: { IcebergWriter , IcebergWriterBuilder } ;
49
49
use iceberg:: { Error , ErrorKind } ;
50
50
use parquet:: file:: properties:: WriterProperties ;
51
51
use uuid:: Uuid ;
@@ -83,26 +83,22 @@ impl IcebergWriteExec {
83
83
)
84
84
}
85
85
86
- // Create a record batch with count and serialized data files
87
- fn make_result_batch ( count : u64 , data_files : Vec < String > ) -> DFResult < RecordBatch > {
88
- let count_array = Arc :: new ( UInt64Array :: from ( vec ! [ count] ) ) as ArrayRef ;
86
+ // Create a record batch with serialized data files
87
+ fn make_result_batch ( data_files : Vec < String > ) -> DFResult < RecordBatch > {
89
88
let files_array = Arc :: new ( StringArray :: from ( data_files) ) as ArrayRef ;
90
89
91
- RecordBatch :: try_from_iter_with_nullable ( vec ! [
92
- ( "count" , count_array, false ) ,
93
- ( "data_files" , files_array, false ) ,
94
- ] )
95
- . map_err ( |e| {
96
- DataFusionError :: ArrowError ( e, Some ( "Failed to make result batch" . to_string ( ) ) )
97
- } )
90
+ RecordBatch :: try_from_iter_with_nullable ( vec ! [ ( "data_files" , files_array, false ) ] ) . map_err (
91
+ |e| DataFusionError :: ArrowError ( e, Some ( "Failed to make result batch" . to_string ( ) ) ) ,
92
+ )
98
93
}
99
94
100
95
fn make_result_schema ( ) -> ArrowSchemaRef {
101
96
// Define a schema.
102
- Arc :: new ( ArrowSchema :: new ( vec ! [
103
- Field :: new( "count" , DataType :: UInt64 , false ) ,
104
- Field :: new( "data_files" , DataType :: Utf8 , false ) ,
105
- ] ) )
97
+ Arc :: new ( ArrowSchema :: new ( vec ! [ Field :: new(
98
+ "data_files" ,
99
+ DataType :: Utf8 ,
100
+ false ,
101
+ ) ] ) )
106
102
}
107
103
}
108
104
@@ -238,7 +234,6 @@ impl ExecutionPlan for IcebergWriteExec {
238
234
writer. write ( batch?) . await . map_err ( to_datafusion_error) ?;
239
235
}
240
236
241
- let count = writer. current_row_num ( ) as u64 ;
242
237
let data_file_builders = writer. close ( ) . await . map_err ( to_datafusion_error) ?;
243
238
244
239
// Convert builders to data files and then to JSON strings
@@ -255,7 +250,7 @@ impl ExecutionPlan for IcebergWriteExec {
255
250
} )
256
251
. collect :: < DFResult < Vec < String > > > ( ) ?;
257
252
258
- Self :: make_result_batch ( count , data_files)
253
+ Self :: make_result_batch ( data_files)
259
254
} )
260
255
. boxed ( ) ;
261
256
0 commit comments