Skip to content

Metadata Log Entries metadata table #667

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 20 commits into from
Jun 26, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions mkdocs/docs/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -606,6 +606,29 @@ min_snapshots_to_keep: [[null,10]]
max_snapshot_age_in_ms: [[null,604800000]]
```

### Metadata Log Entries

To show table metadata log entries:

```python
table.inspect.metadata_log_entries()
```

```
pyarrow.Table
timestamp: timestamp[ms]
file: string
latest_snapshot_id: int64 not null
latest_schema_id: int32 not null
latest_sequence_number: int64 not null
----
timestamp: [[2024-04-28 17:03:00.214,2024-04-28 17:03:00.352,2024-04-28 17:03:00.445,2024-04-28 17:03:00.498]]
file: [["s3://warehouse/default/table_metadata_log_entries/metadata/00000-0b3b643b-0f3a-4787-83ad-601ba57b7319.metadata.json","s3://warehouse/default/table_metadata_log_entries/metadata/00001-f74e4b2c-0f89-4f55-822d-23d099fd7d54.metadata.json","s3://warehouse/default/table_metadata_log_entries/metadata/00002-97e31507-e4d9-4438-aff1-3c0c5304d271.metadata.json","s3://warehouse/default/table_metadata_log_entries/metadata/00003-6c8b7033-6ad8-4fe4-b64d-d70381aeaddc.metadata.json"]]
latest_snapshot_id: [[null,3958871664825505738,1289234307021405706,7640277914614648349]]
latest_schema_id: [[null,0,0,0]]
latest_sequence_number: [[null,0,0,0]]
```

## Add Files

Expert Iceberg users may choose to commit existing parquet files to the Iceberg table as data files, without rewriting them.
Expand Down
33 changes: 33 additions & 0 deletions pyiceberg/table/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3537,6 +3537,39 @@ def update_partitions_map(
schema=table_schema,
)

def metadata_log_entries(self) -> "pa.Table":
import pyarrow as pa

from pyiceberg.table.snapshots import MetadataLogEntry

table_schema = pa.schema([
pa.field("timestamp", pa.timestamp(unit='ms'), nullable=False),
pa.field("file", pa.string(), nullable=False),
pa.field("latest_snapshot_id", pa.int64(), nullable=True),
pa.field("latest_schema_id", pa.int32(), nullable=True),
pa.field("latest_sequence_number", pa.int64(), nullable=True),
])

def metadata_log_entry_to_row(metadata_entry: MetadataLogEntry) -> Dict[str, Any]:
latest_snapshot = self.tbl.metadata._snapshot_as_of_timestamp_ms(metadata_entry.timestamp_ms)
return {
"timestamp": metadata_entry.timestamp_ms,
"file": metadata_entry.metadata_file,
"latest_snapshot_id": latest_snapshot.snapshot_id if latest_snapshot else None,
"latest_schema_id": latest_snapshot.schema_id if latest_snapshot else None,
"latest_sequence_number": latest_snapshot.sequence_number if latest_snapshot else None,
}

# imitates `addPreviousFile` from Java, might could move this to `metadata_log` constructor
metadata_log_entries = self.tbl.metadata.metadata_log + [
MetadataLogEntry(metadata_file=self.tbl.metadata_location, timestamp_ms=self.tbl.metadata.last_updated_ms)
]

return pa.Table.from_pylist(
[metadata_log_entry_to_row(entry) for entry in metadata_log_entries],
schema=table_schema,
)


@dataclass(frozen=True)
class TablePartition:
Expand Down
7 changes: 7 additions & 0 deletions pyiceberg/table/metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,13 @@ def snapshot_by_name(self, name: str) -> Optional[Snapshot]:
return self.snapshot_by_id(ref.snapshot_id)
return None

def _snapshot_as_of_timestamp_ms(self, timestamp_ms: int) -> Optional[Snapshot]:
"""Return the snapshot that was current at the given timestamp or null if no such snapshot exists."""
for entry in reversed(self.snapshot_log):
if entry.timestamp_ms <= timestamp_ms:
return self.snapshot_by_id(entry.snapshot_id)
return None

def current_snapshot(self) -> Optional[Snapshot]:
"""Get the current snapshot for this table, or None if there is no current snapshot."""
if self.current_snapshot_id is not None:
Expand Down
3 changes: 2 additions & 1 deletion pyiceberg/table/snapshots.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,8 @@ def __eq__(self, other: Any) -> bool:
class Snapshot(IcebergBaseModel):
snapshot_id: int = Field(alias="snapshot-id")
parent_snapshot_id: Optional[int] = Field(alias="parent-snapshot-id", default=None)
sequence_number: Optional[int] = Field(alias="sequence-number", default=None)
# cannot import `INITIAL_SEQUENCE_NUMBER` due to circular import
sequence_number: Optional[int] = Field(alias="sequence-number", default=0)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason the default value for the sequence number has to be changed to 0 as opposed to None?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

According to the spec, https://iceberg.apache.org/spec/#version-2

Snapshot JSON:
sequence-number was added and is required; default to 0 when reading v1 metadata

Also added this in the PR description

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@kevinjqliu Thanks for spotting this! We definitely need to read snapshot.sequence_number as 0 for v1. However, as we have observed in the test outcome, making sequence_number default to 0 here leads to sequence_number=0 be written to version 1 table metada's snapshots, which is not allowed by spec:

Writing v1 metadata:
Snapshot field sequence-number should not be written

I think we may need a new field_serializer in TableMetadataCommonFields class or some other ways to correct the behavior on write. WDYT?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you! I missed the part about the V1 spec. Following your suggestion, I added a field_serializer for TableMetadataCommonFields. This will ensure that the Snapshot pydantic object will not have the sequence-number field for V1 format

timestamp_ms: int = Field(alias="timestamp-ms", default_factory=lambda: int(time.time() * 1000))
manifest_list: Optional[str] = Field(
alias="manifest-list", description="Location of the snapshot's manifest list file", default=None
Expand Down
34 changes: 34 additions & 0 deletions tests/integration/test_inspect_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,3 +445,37 @@ def check_pyiceberg_df_equals_spark_df(df: pa.Table, spark_df: DataFrame) -> Non
df = tbl.inspect.partitions(snapshot_id=snapshot.snapshot_id)
spark_df = spark.sql(f"SELECT * FROM {identifier}.partitions VERSION AS OF {snapshot.snapshot_id}")
check_pyiceberg_df_equals_spark_df(df, spark_df)


@pytest.mark.integration
@pytest.mark.parametrize("format_version", [1, 2])
def test_inspect_metadata_log_entries(
spark: SparkSession, session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int
) -> None:
from pandas.testing import assert_frame_equal

identifier = "default.table_metadata_log_entries"
tbl = _create_table(session_catalog, identifier, properties={"format-version": format_version})

# Write some data
tbl.append(arrow_table_with_null)
tbl.append(arrow_table_with_null)
tbl.append(arrow_table_with_null)

df = tbl.inspect.metadata_log_entries()
spark_df = spark.sql(f"SELECT * FROM {identifier}.metadata_log_entries")
lhs = df.to_pandas()
rhs = spark_df.toPandas()

# Timestamp in the last row of `metadata_log_entries` table is based on when the table was read
# Therefore, the timestamp of the last row for pyiceberg dataframe and spark dataframe will be different
left_before_last, left_last = lhs[:-1], lhs[-1:]
right_before_last, right_last = rhs[:-1], rhs[-1:]

assert_frame_equal(left_before_last, right_before_last, check_dtype=False)
# compare the last row, except for the timestamp
for column in df.column_names:
for left, right in zip(left_last[column], right_last[column]):
if column == 'timestamp':
continue
assert left == right, f"Difference in column {column}: {left} != {right}"