@@ -27,11 +27,11 @@ use datafusion::arrow::datatypes::{
27
27
use datafusion:: common:: Result as DFResult ;
28
28
use datafusion:: error:: DataFusionError ;
29
29
use datafusion:: execution:: { SendableRecordBatchStream , TaskContext } ;
30
- use datafusion:: physical_expr:: { EquivalenceProperties , Partitioning } ;
31
- use datafusion:: physical_plan:: execution_plan:: { Boundedness , EmissionType } ;
30
+ use datafusion:: physical_expr:: EquivalenceProperties ;
32
31
use datafusion:: physical_plan:: stream:: RecordBatchStreamAdapter ;
33
32
use datafusion:: physical_plan:: {
34
- DisplayAs , DisplayFormatType , ExecutionPlan , PlanProperties , execute_input_stream,
33
+ DisplayAs , DisplayFormatType , ExecutionPlan , ExecutionPlanProperties , PlanProperties ,
34
+ execute_input_stream,
35
35
} ;
36
36
use futures:: StreamExt ;
37
37
use iceberg:: arrow:: schema_to_arrow_schema;
@@ -41,10 +41,10 @@ use iceberg::spec::{
41
41
} ;
42
42
use iceberg:: table:: Table ;
43
43
use iceberg:: writer:: base_writer:: data_file_writer:: DataFileWriterBuilder ;
44
+ use iceberg:: writer:: file_writer:: ParquetWriterBuilder ;
44
45
use iceberg:: writer:: file_writer:: location_generator:: {
45
46
DefaultFileNameGenerator , DefaultLocationGenerator ,
46
47
} ;
47
- use iceberg:: writer:: file_writer:: ParquetWriterBuilder ;
48
48
use iceberg:: writer:: { CurrentFileStatus , IcebergWriter , IcebergWriterBuilder } ;
49
49
use iceberg:: { Error , ErrorKind } ;
50
50
use parquet:: file:: properties:: WriterProperties ;
@@ -61,7 +61,7 @@ pub(crate) struct IcebergWriteExec {
61
61
62
62
impl IcebergWriteExec {
63
63
pub fn new ( table : Table , input : Arc < dyn ExecutionPlan > , schema : ArrowSchemaRef ) -> Self {
64
- let plan_properties = Self :: compute_properties ( schema. clone ( ) ) ;
64
+ let plan_properties = Self :: compute_properties ( & input , schema. clone ( ) ) ;
65
65
66
66
Self {
67
67
table,
@@ -71,16 +71,15 @@ impl IcebergWriteExec {
71
71
}
72
72
}
73
73
74
- /// todo: Copied from scan.rs
75
- fn compute_properties ( schema : ArrowSchemaRef ) -> PlanProperties {
76
- // TODO:
77
- // This is more or less a placeholder, to be replaced
78
- // once we support output-partitioning
74
+ fn compute_properties (
75
+ input : & Arc < dyn ExecutionPlan > ,
76
+ schema : ArrowSchemaRef ,
77
+ ) -> PlanProperties {
79
78
PlanProperties :: new (
80
79
EquivalenceProperties :: new ( schema) ,
81
- Partitioning :: UnknownPartitioning ( 1 ) ,
82
- EmissionType :: Incremental ,
83
- Boundedness :: Bounded ,
80
+ input . output_partitioning ( ) . clone ( ) ,
81
+ input . pipeline_behavior ( ) ,
82
+ input . boundedness ( ) ,
84
83
)
85
84
}
86
85
@@ -101,8 +100,8 @@ impl IcebergWriteExec {
101
100
fn make_result_schema ( ) -> ArrowSchemaRef {
102
101
// Define a schema.
103
102
Arc :: new ( ArrowSchema :: new ( vec ! [
104
- Field :: new( "data_files" , DataType :: Utf8 , false ) ,
105
103
Field :: new( "count" , DataType :: UInt64 , false ) ,
104
+ Field :: new( "data_files" , DataType :: Utf8 , false ) ,
106
105
] ) )
107
106
}
108
107
}
0 commit comments