@@ -331,10 +331,10 @@ def upgrade_table_version(self, format_version: TableVersion) -> Transaction:
331
331
if format_version not in {1 , 2 }:
332
332
raise ValueError (f"Unsupported table format version: { format_version } " )
333
333
334
- if format_version < self ._table . metadata .format_version :
335
- raise ValueError (f"Cannot downgrade v{ self ._table . metadata .format_version } table to v{ format_version } " )
334
+ if format_version < self .table_metadata .format_version :
335
+ raise ValueError (f"Cannot downgrade v{ self .table_metadata .format_version } table to v{ format_version } " )
336
336
337
- if format_version > self ._table . metadata .format_version :
337
+ if format_version > self .table_metadata .format_version :
338
338
return self ._apply ((UpgradeFormatVersionUpdate (format_version = format_version ),))
339
339
340
340
return self
@@ -452,7 +452,7 @@ def update_schema(self, allow_incompatible_changes: bool = False, case_sensitive
452
452
self ,
453
453
allow_incompatible_changes = allow_incompatible_changes ,
454
454
case_sensitive = case_sensitive ,
455
- name_mapping = self ._table .name_mapping (),
455
+ name_mapping = self .table_metadata .name_mapping (),
456
456
)
457
457
458
458
def update_snapshot (self , snapshot_properties : Dict [str , str ] = EMPTY_DICT ) -> UpdateSnapshot :
@@ -489,7 +489,7 @@ def append(self, df: pa.Table, snapshot_properties: Dict[str, str] = EMPTY_DICT)
489
489
)
490
490
downcast_ns_timestamp_to_us = Config ().get_bool (DOWNCAST_NS_TIMESTAMP_TO_US_ON_WRITE ) or False
491
491
_check_pyarrow_schema_compatible (
492
- self ._table .schema (), provided_schema = df .schema , downcast_ns_timestamp_to_us = downcast_ns_timestamp_to_us
492
+ self .table_metadata .schema (), provided_schema = df .schema , downcast_ns_timestamp_to_us = downcast_ns_timestamp_to_us
493
493
)
494
494
495
495
manifest_merge_enabled = PropertyUtil .property_as_bool (
@@ -504,7 +504,7 @@ def append(self, df: pa.Table, snapshot_properties: Dict[str, str] = EMPTY_DICT)
504
504
# skip writing data files if the dataframe is empty
505
505
if df .shape [0 ] > 0 :
506
506
data_files = _dataframe_to_data_files (
507
- table_metadata = self ._table . metadata , write_uuid = append_files .commit_uuid , df = df , io = self ._table .io
507
+ table_metadata = self .table_metadata , write_uuid = append_files .commit_uuid , df = df , io = self ._table .io
508
508
)
509
509
for data_file in data_files :
510
510
append_files .append_data_file (data_file )
@@ -548,7 +548,7 @@ def overwrite(
548
548
)
549
549
downcast_ns_timestamp_to_us = Config ().get_bool (DOWNCAST_NS_TIMESTAMP_TO_US_ON_WRITE ) or False
550
550
_check_pyarrow_schema_compatible (
551
- self ._table .schema (), provided_schema = df .schema , downcast_ns_timestamp_to_us = downcast_ns_timestamp_to_us
551
+ self .table_metadata .schema (), provided_schema = df .schema , downcast_ns_timestamp_to_us = downcast_ns_timestamp_to_us
552
552
)
553
553
554
554
self .delete (delete_filter = overwrite_filter , snapshot_properties = snapshot_properties )
@@ -557,7 +557,7 @@ def overwrite(
557
557
# skip writing data files if the dataframe is empty
558
558
if df .shape [0 ] > 0 :
559
559
data_files = _dataframe_to_data_files (
560
- table_metadata = self ._table . metadata , write_uuid = update_snapshot .commit_uuid , df = df , io = self ._table .io
560
+ table_metadata = self .table_metadata , write_uuid = update_snapshot .commit_uuid , df = df , io = self ._table .io
561
561
)
562
562
for data_file in data_files :
563
563
update_snapshot .append_data_file (data_file )
@@ -595,7 +595,7 @@ def delete(self, delete_filter: Union[str, BooleanExpression], snapshot_properti
595
595
596
596
# Check if there are any files that require an actual rewrite of a data file
597
597
if delete_snapshot .rewrites_needed is True :
598
- bound_delete_filter = bind (self ._table .schema (), delete_filter , case_sensitive = True )
598
+ bound_delete_filter = bind (self .table_metadata .schema (), delete_filter , case_sensitive = True )
599
599
preserve_row_filter = _expression_to_complementary_pyarrow (bound_delete_filter )
600
600
601
601
files = self ._scan (row_filter = delete_filter ).plan_files ()
@@ -614,7 +614,7 @@ def delete(self, delete_filter: Union[str, BooleanExpression], snapshot_properti
614
614
for original_file in files :
615
615
df = project_table (
616
616
tasks = [original_file ],
617
- table_metadata = self ._table . metadata ,
617
+ table_metadata = self .table_metadata ,
618
618
io = self ._table .io ,
619
619
row_filter = AlwaysTrue (),
620
620
projected_schema = self .table_metadata .schema (),
@@ -629,7 +629,7 @@ def delete(self, delete_filter: Union[str, BooleanExpression], snapshot_properti
629
629
_dataframe_to_data_files (
630
630
io = self ._table .io ,
631
631
df = filtered_df ,
632
- table_metadata = self ._table . metadata ,
632
+ table_metadata = self .table_metadata ,
633
633
write_uuid = commit_uuid ,
634
634
counter = counter ,
635
635
)
@@ -658,11 +658,13 @@ def add_files(self, file_paths: List[str], snapshot_properties: Dict[str, str] =
658
658
Raises:
659
659
FileNotFoundError: If the file does not exist.
660
660
"""
661
- if self ._table .name_mapping () is None :
662
- self .set_properties (** {TableProperties .DEFAULT_NAME_MAPPING : self ._table .schema ().name_mapping .model_dump_json ()})
661
+ if self .table_metadata .name_mapping () is None :
662
+ self .set_properties (** {
663
+ TableProperties .DEFAULT_NAME_MAPPING : self .table_metadata .schema ().name_mapping .model_dump_json ()
664
+ })
663
665
with self .update_snapshot (snapshot_properties = snapshot_properties ).fast_append () as update_snapshot :
664
666
data_files = _parquet_files_to_data_files (
665
- table_metadata = self ._table . metadata , file_paths = file_paths , io = self ._table .io
667
+ table_metadata = self .table_metadata , file_paths = file_paths , io = self ._table .io
666
668
)
667
669
for data_file in data_files :
668
670
update_snapshot .append_data_file (data_file )
0 commit comments