Skip to content

Commit 1271e84

Browse files
authored
HIVE-29205: Addendum: Iceberg: Upgrade iceberg version to 1.10.0 (#6235)
1 parent da55861 commit 1271e84

File tree

12 files changed

+66
-930
lines changed

12 files changed

+66
-930
lines changed

iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/HiveTableTest.java

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,18 +38,17 @@
3838
import org.apache.hadoop.hive.serde.serdeConstants;
3939
import org.apache.hive.iceberg.org.apache.avro.generic.GenericData;
4040
import org.apache.hive.iceberg.org.apache.avro.generic.GenericRecordBuilder;
41-
import org.apache.iceberg.BaseTable;
4241
import org.apache.iceberg.DataFile;
4342
import org.apache.iceberg.DataFiles;
4443
import org.apache.iceberg.FileScanTask;
4544
import org.apache.iceberg.Files;
46-
import org.apache.iceberg.HasTableOperations;
4745
import org.apache.iceberg.ManifestFile;
4846
import org.apache.iceberg.PartitionSpec;
4947
import org.apache.iceberg.Schema;
5048
import org.apache.iceberg.Table;
5149
import org.apache.iceberg.TableMetadataParser;
5250
import org.apache.iceberg.TableProperties;
51+
import org.apache.iceberg.TableUtil;
5352
import org.apache.iceberg.avro.Avro;
5453
import org.apache.iceberg.avro.AvroSchemaUtil;
5554
import org.apache.iceberg.catalog.Namespace;
@@ -259,7 +258,7 @@ public void testDropTable() throws IOException {
259258
.as("Table manifest files should not exist")
260259
.doesNotExist();
261260
}
262-
assertThat(new File(((HasTableOperations) table).operations().current().metadataFileLocation()
261+
assertThat(new File(TableUtil.metadataFileLocation(table)
263262
.replace("file:", "")))
264263
.as("Table metadata file should not exist")
265264
.doesNotExist();
@@ -552,7 +551,7 @@ public void testRegisterHadoopTableToHiveCatalog() throws IOException, TExceptio
552551
.hasMessage("Table does not exist: hivedb.table1");
553552

554553
// register the table to hive catalog using the latest metadata file
555-
String latestMetadataFile = ((BaseTable) table).operations().current().metadataFileLocation();
554+
String latestMetadataFile = TableUtil.metadataFileLocation(table);
556555
catalog.registerTable(identifier, "file:" + latestMetadataFile);
557556
assertThat(HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, "table1")).isNotNull();
558557

iceberg/iceberg-handler/src/main/java/org/apache/iceberg/data/PartitionStatsHandler.java

Lines changed: 0 additions & 285 deletions
This file was deleted.

iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/BaseHiveIcebergMetaHook.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@
4646
import org.apache.hadoop.hive.ql.ddl.misc.sortoder.ZOrderFields;
4747
import org.apache.hadoop.hive.ql.util.NullOrdering;
4848
import org.apache.iceberg.BaseMetastoreTableOperations;
49-
import org.apache.iceberg.BaseTable;
5049
import org.apache.iceberg.NullOrder;
5150
import org.apache.iceberg.PartitionSpec;
5251
import org.apache.iceberg.PartitionSpecParser;
@@ -57,6 +56,7 @@
5756
import org.apache.iceberg.SortOrderParser;
5857
import org.apache.iceberg.Table;
5958
import org.apache.iceberg.TableProperties;
59+
import org.apache.iceberg.TableUtil;
6060
import org.apache.iceberg.catalog.TableIdentifier;
6161
import org.apache.iceberg.exceptions.NoSuchTableException;
6262
import org.apache.iceberg.exceptions.NotFoundException;
@@ -441,7 +441,7 @@ static boolean isOrcFileFormat(org.apache.hadoop.hive.metastore.api.Table hmsTab
441441
}
442442

443443
protected void setWriteModeDefaults(Table icebergTbl, Map<String, String> newProps, EnvironmentContext context) {
444-
if ((icebergTbl == null || ((BaseTable) icebergTbl).operations().current().formatVersion() == 1) &&
444+
if ((icebergTbl == null || TableUtil.formatVersion(icebergTbl) == 1) &&
445445
IcebergTableUtil.isV2TableOrAbove(newProps)) {
446446
List<String> writeModeList = ImmutableList.of(
447447
TableProperties.DELETE_MODE, TableProperties.UPDATE_MODE, TableProperties.MERGE_MODE);
@@ -473,7 +473,7 @@ public void postGetTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) {
473473
if (hmsTable != null) {
474474
try {
475475
Table tbl = IcebergTableUtil.getTable(conf, hmsTable);
476-
String formatVersion = String.valueOf(((BaseTable) tbl).operations().current().formatVersion());
476+
String formatVersion = String.valueOf(TableUtil.formatVersion(tbl));
477477
hmsTable.getParameters().put(TableProperties.FORMAT_VERSION, formatVersion);
478478
// Set the serde info
479479
hmsTable.getSd().setInputFormat(HiveIcebergInputFormat.class.getName());

iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@
9696
import org.apache.iceberg.TableMetadata;
9797
import org.apache.iceberg.TableMetadataParser;
9898
import org.apache.iceberg.TableProperties;
99+
import org.apache.iceberg.TableUtil;
99100
import org.apache.iceberg.Transaction;
100101
import org.apache.iceberg.UpdatePartitionSpec;
101102
import org.apache.iceberg.UpdateProperties;
@@ -552,7 +553,7 @@ public void rollbackAlterTable(org.apache.hadoop.hive.metastore.api.Table hmsTab
552553
}
553554

554555
// we want to keep the data files but get rid of the metadata directory
555-
String metadataLocation = ((BaseTable) this.icebergTable).operations().current().metadataFileLocation();
556+
String metadataLocation = TableUtil.metadataFileLocation(this.icebergTable);
556557
try {
557558
Path path = new Path(metadataLocation).getParent();
558559
FileSystem.get(path.toUri(), conf).delete(path, true);

iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ private static HiveIcebergWriter writer(JobConf jc) {
7272
}
7373

7474
private static void setWriterLevelConfiguration(JobConf jc, Table table) {
75-
final String writeFormat = table.properties().get("write.format.default");
75+
final String writeFormat = table.properties().get(TableProperties.DEFAULT_FILE_FORMAT);
7676
if (writeFormat == null || "PARQUET".equalsIgnoreCase(writeFormat)) {
7777
if (table.properties().get(TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES) == null &&
7878
jc.get(ParquetOutputFormat.BLOCK_SIZE) != null) {

0 commit comments

Comments
 (0)