|
36 | 36 | from pyiceberg.catalog.hive import HiveCatalog
|
37 | 37 | from pyiceberg.catalog.sql import SqlCatalog
|
38 | 38 | from pyiceberg.exceptions import NoSuchTableError
|
| 39 | +from pyiceberg.partitioning import PartitionField, PartitionSpec |
| 40 | +from pyiceberg.schema import Schema |
39 | 41 | from pyiceberg.table import TableProperties, _dataframe_to_data_files
|
| 42 | +from pyiceberg.transforms import IdentityTransform |
| 43 | +from pyiceberg.types import IntegerType, NestedField |
40 | 44 | from tests.conftest import TEST_DATA_WITH_NULL
|
41 | 45 | from utils import _create_table
|
42 | 46 |
|
@@ -807,3 +811,25 @@ def test_hive_catalog_storage_descriptor(
|
807 | 811 | assert len(tbl.scan().to_arrow()) == 3
|
808 | 812 | # check if spark can read the table
|
809 | 813 | assert spark.sql("SELECT * FROM hive.default.test_storage_descriptor").count() == 3
|
| 814 | + |
| 815 | + |
| 816 | +@pytest.mark.integration |
| 817 | +@pytest.mark.parametrize('catalog', [pytest.lazy_fixture('session_catalog_hive'), pytest.lazy_fixture('session_catalog')]) |
| 818 | +def test_sanitize_character_partitioned(catalog: Catalog) -> None: |
| 819 | + table_name = "default.test_table_partitioned_sanitized_character" |
| 820 | + try: |
| 821 | + catalog.drop_table(table_name) |
| 822 | + except NoSuchTableError: |
| 823 | + pass |
| 824 | + |
| 825 | + tbl = _create_table( |
| 826 | + session_catalog=catalog, |
| 827 | + identifier=table_name, |
| 828 | + schema=Schema(NestedField(field_id=1, name="some.id", type=IntegerType(), required=True)), |
| 829 | + partition_spec=PartitionSpec( |
| 830 | + PartitionField(source_id=1, field_id=1000, name="some.id_identity", transform=IdentityTransform()) |
| 831 | + ), |
| 832 | + data=[pa.Table.from_arrays([range(22)], schema=pa.schema([pa.field("some.id", pa.int32(), nullable=False)]))], |
| 833 | + ) |
| 834 | + |
| 835 | + assert len(tbl.scan().to_arrow()) == 22 |
0 commit comments