Skip to content

Commit 56ea49a

Browse files
author
ragupta
committed
separated redundant code and made implementation generic to non native tables
1 parent a32a9b8 commit 56ea49a

File tree

6 files changed

+21
-53
lines changed

6 files changed

+21
-53
lines changed

iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2075,6 +2075,13 @@ public boolean isPartitioned(org.apache.hadoop.hive.ql.metadata.Table hmsTable)
20752075
return table.spec().isPartitioned();
20762076
}
20772077

2078+
@Override
2079+
public boolean isPartitionPresent(org.apache.hadoop.hive.ql.metadata.Table table,
2080+
Map<String, String> partitionSpec) throws SemanticException {
2081+
return getPartitionKeys(table).size() == partitionSpec.size() &&
2082+
!getPartitions(table, partitionSpec, false).isEmpty();
2083+
}
2084+
20782085
@Override
20792086
public Partition getPartition(org.apache.hadoop.hive.ql.metadata.Table table,
20802087
Map<String, String> partitionSpec, RewritePolicy policy) throws SemanticException {

ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableAnalyzer.java

Lines changed: 5 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,9 @@
1818

1919
package org.apache.hadoop.hive.ql.ddl.table.info.desc;
2020

21-
import java.util.List;
2221
import java.util.Map;
2322

2423
import org.apache.hadoop.hive.common.TableName;
25-
import org.apache.hadoop.hive.metastore.api.FieldSchema;
2624
import org.apache.hadoop.hive.ql.ErrorMsg;
2725
import org.apache.hadoop.hive.ql.QueryState;
2826
import org.apache.hadoop.hive.ql.ddl.DDLWork;
@@ -162,8 +160,6 @@ private Map<String, String> getPartitionSpec(Hive db, ASTNode node, TableName ta
162160

163161
Map<String, String> partitionSpec = null;
164162
try {
165-
partitionSpec = getPartSpec(partNode);
166-
validateUnsupportedPartitionClause(tab, partitionSpec != null && !partitionSpec.isEmpty());
167163
partitionSpec = getValidatedPartSpec(tab, partNode, db.getConf(), false);
168164
} catch (SemanticException e) {
169165
// get exception in resolving partition it could be DESCRIBE table key
@@ -172,16 +168,18 @@ private Map<String, String> getPartitionSpec(Hive db, ASTNode node, TableName ta
172168
}
173169

174170
if (partitionSpec != null) {
175-
Partition part = null;
171+
boolean isPartitionPresent;
176172
try {
177-
part = getPartition(tab, partitionSpec);
173+
isPartitionPresent = tab.isNonNative() ?
174+
tab.getStorageHandler().isPartitionPresent(tab, partitionSpec) :
175+
db.getPartition(tab, partitionSpec) != null;
178176
} catch (HiveException e) {
179177
// if get exception in finding partition it could be DESCRIBE table key
180178
// return null, continue processing for DESCRIBE table key
181179
return null;
182180
}
183181

184-
if (part == null) {
182+
if (!isPartitionPresent) {
185183
throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partitionSpec.toString()));
186184
}
187185

@@ -190,20 +188,4 @@ private Map<String, String> getPartitionSpec(Hive db, ASTNode node, TableName ta
190188
}
191189
return null;
192190
}
193-
194-
private Partition getPartition(Table tab, Map<String, String> partitionSpec) throws HiveException {
195-
boolean isIcebergTable = DDLUtils.isIcebergTable(tab);
196-
if (isIcebergTable) {
197-
List<FieldSchema> partKeys = tab.getStorageHandler().getPartitionKeys(tab);
198-
if (partKeys.size() != partitionSpec.size()) {
199-
return null;
200-
}
201-
List<Partition> partList = tab.getStorageHandler().getPartitions(tab, partitionSpec, false);
202-
if (partList.isEmpty()) {
203-
return null;
204-
}
205-
return partList.getFirst();
206-
}
207-
return db.getPartition(tab, partitionSpec, false);
208-
}
209191
}

ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@
4040
import org.apache.hadoop.hive.metastore.api.MetaException;
4141
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
4242
import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
43-
import org.apache.hadoop.hive.ql.ddl.DDLUtils;
4443
import org.apache.hadoop.hive.ql.ddl.ShowUtils;
4544
import org.apache.hadoop.hive.ql.ddl.table.info.desc.formatter.DescTableFormatter;
4645
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -128,14 +127,7 @@ private Table getTable() throws HiveException {
128127
private Partition getPartition(Table table) throws HiveException {
129128
Partition part = null;
130129
if (desc.getPartitionSpec() != null) {
131-
boolean isIcebergTable = DDLUtils.isIcebergTable(table);
132-
if (isIcebergTable) {
133-
List<Partition> partList = table.getStorageHandler().getPartitions(table, desc.getPartitionSpec(), false);
134-
part = (partList.isEmpty()) ? null : partList.getFirst();
135-
136-
} else {
137-
part = context.getDb().getPartition(table, desc.getPartitionSpec(), false);
138-
}
130+
part = context.getDb().getPartition(table, desc.getPartitionSpec());
139131
if (part == null) {
140132
throw new HiveException(ErrorMsg.INVALID_PARTITION,
141133
StringUtils.join(desc.getPartitionSpec().keySet(), ','), desc.getDbTableName());

ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/formatter/TextDescTableFormatter.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535
import org.apache.hadoop.hive.metastore.api.SourceTable;
3636
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
3737
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
38-
import org.apache.hadoop.hive.ql.ddl.DDLUtils;
3938
import org.apache.hadoop.hive.ql.ddl.ShowUtils;
4039
import org.apache.hadoop.hive.ql.ddl.ShowUtils.TextMetaDataTable;
4140
import org.apache.hadoop.hive.ql.ddl.table.info.desc.DescTableDesc;
@@ -172,10 +171,11 @@ private void addPartitionData(DataOutputStream out, HiveConf conf, String column
172171
boolean isFormatted, boolean isOutputPadded) throws IOException {
173172
String partitionData = "";
174173
if (columnPath == null) {
175-
boolean isIcebergTable = DDLUtils.isIcebergTable(table);
176174
List<FieldSchema> partitionColumns = null;
177175
if (table.isPartitioned()) {
178-
partitionColumns = isIcebergTable ? table.getStorageHandler().getPartitionKeys(table) : table.getPartCols();
176+
partitionColumns = table.isNonNative() ?
177+
table.getStorageHandler().getPartitionKeys(table) :
178+
table.getPartCols();
179179
}
180180
if (CollectionUtils.isNotEmpty(partitionColumns) &&
181181
conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY)) {

ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -987,6 +987,11 @@ default boolean isPartitioned(org.apache.hadoop.hive.ql.metadata.Table table) {
987987
return false;
988988
}
989989

990+
default boolean isPartitionPresent(org.apache.hadoop.hive.ql.metadata.Table table,
991+
Map<String, String> partitionSpec) throws SemanticException {
992+
return false;
993+
}
994+
990995
default boolean hasUndergonePartitionEvolution(org.apache.hadoop.hive.ql.metadata.Table table) {
991996
throw new UnsupportedOperationException("Storage handler does not support checking if table " +
992997
"undergone partition evolution.");

ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@
6868
import org.apache.hadoop.hive.ql.QueryState;
6969
import org.apache.hadoop.hive.ql.cache.results.CacheUsage;
7070
import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId;
71-
import org.apache.hadoop.hive.ql.ddl.DDLUtils;
7271
import org.apache.hadoop.hive.ql.ddl.table.constraint.ConstraintsUtils;
7372
import org.apache.hadoop.hive.ql.exec.FetchTask;
7473
import org.apache.hadoop.hive.ql.exec.Task;
@@ -1740,23 +1739,6 @@ public static void validatePartSpec(Table tbl, Map<String, String> partSpec,
17401739
}
17411740
}
17421741

1743-
/**
1744-
* Throws an UnsupportedOperationException in case the query has a partition clause but the table is never partitioned
1745-
* on the HMS-level. Even though table is not partitioned from the HMS's point of view, it might have some other
1746-
* notion of partitioning under the hood (e.g. Iceberg tables). In these cases, we might decide to proactively throw a
1747-
* more descriptive, unified error message instead of failing on some other semantic analysis validation step, which
1748-
* could provide a more counter-intuitive exception message.
1749-
*
1750-
* @param tbl The table object, should not be null.
1751-
* @param partitionClausePresent Whether a partition clause is present in the query (e.g. PARTITION(last_name='Don'))
1752-
*/
1753-
protected static void validateUnsupportedPartitionClause(Table tbl, boolean partitionClausePresent) {
1754-
if (partitionClausePresent && tbl.hasNonNativePartitionSupport() && !DDLUtils.isIcebergTable(tbl)) {
1755-
throw new UnsupportedOperationException("Using partition spec in query is unsupported for non-native table" +
1756-
" backed by: " + tbl.getStorageHandler().toString());
1757-
}
1758-
}
1759-
17601742
public static void validatePartColumnType(Table tbl, Map<String, String> partSpec,
17611743
ASTNode astNode, HiveConf conf) throws SemanticException {
17621744
if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TYPE_CHECK_ON_INSERT)) {

0 commit comments

Comments
 (0)