From f6c740981a05ea619d6213a1d028cd642b96946e Mon Sep 17 00:00:00 2001 From: Ramit Gupta Date: Wed, 8 Apr 2026 23:50:56 +0530 Subject: [PATCH 1/5] HIVE-29413: Avoid code duplication by updating getPartCols method for iceberg tables --- .../column/show/ShowColumnsOperation.java | 5 +- .../table/info/desc/DescTableOperation.java | 2 +- .../formatter/TextDescTableFormatter.java | 4 +- .../ddl/table/partition/PartitionUtils.java | 1 - .../hive/ql/metadata/DummyPartition.java | 4 +- .../hadoop/hive/ql/metadata/Partition.java | 3 +- .../apache/hadoop/hive/ql/metadata/Table.java | 62 ++++++++++++++----- .../ql/optimizer/ColumnPrunerProcFactory.java | 3 +- .../ql/parse/AcidExportSemanticAnalyzer.java | 3 +- .../parse/ColumnStatsAutoGatherContext.java | 3 +- .../ql/parse/ColumnStatsSemanticAnalyzer.java | 3 +- .../hive/ql/parse/ImportSemanticAnalyzer.java | 2 - .../hive/ql/parse/MergeSemanticAnalyzer.java | 3 +- .../hadoop/hive/ql/parse/ParseUtils.java | 3 +- .../rewrite/CopyOnWriteMergeRewriter.java | 2 +- .../hive/ql/parse/rewrite/MergeRewriter.java | 2 +- .../ql/parse/rewrite/SplitMergeRewriter.java | 2 +- 17 files changed, 65 insertions(+), 42 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/show/ShowColumnsOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/show/ShowColumnsOperation.java index 289479b7ee79..6a2abd4ddfc2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/show/ShowColumnsOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/show/ShowColumnsOperation.java @@ -66,10 +66,7 @@ private List getColumnsByPattern() throws HiveException { private List getCols() throws HiveException { Table table = context.getDb().getTable(desc.getTableName()); - List allColumns = new ArrayList<>(); - allColumns.addAll(table.getCols()); - allColumns.addAll(table.getPartCols()); - return allColumns; + return new ArrayList<>(table.getAllCols()); } private Matcher getMatcher() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java index b60126a8db15..b0e038615e09 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java @@ -130,7 +130,7 @@ private Deserializer getDeserializer(Table table) throws SQLException { private void getColumnsNoColumnPath(Table table, Partition partition, List cols) throws HiveException { cols.addAll(partition == null || table.getTableType() == TableType.VIRTUAL_VIEW ? table.getCols() : partition.getCols()); - if (!desc.isFormatted()) { + if (!desc.isFormatted() && !table.hasNonNativePartitionSupport()) { cols.addAll(table.getPartCols()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/formatter/TextDescTableFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/formatter/TextDescTableFormatter.java index b1dd9738572a..30d8dde3bd9b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/formatter/TextDescTableFormatter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/formatter/TextDescTableFormatter.java @@ -174,9 +174,7 @@ private void addPartitionData(DataOutputStream out, HiveConf conf, String column List partitionColumns = null; // TODO (HIVE-29413): Refactor to a generic getPartCols() implementation if (table.isPartitioned()) { - partitionColumns = table.hasNonNativePartitionSupport() ? - table.getStorageHandler().getPartitionKeys(table) : - table.getPartCols(); + partitionColumns = table.getPartCols(); } if (CollectionUtils.isNotEmpty(partitionColumns) && conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java index db7a5dfcd3d0..5882e4616506 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.Map.Entry; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java index c188eb09fdcf..45087c901e3f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java @@ -91,9 +91,7 @@ public List getValues() { values = new ArrayList<>(); // TODO (HIVE-29413): Refactor to a generic getPartCols() implementation - for (FieldSchema fs : table.hasNonNativePartitionSupport() - ? table.getStorageHandler().getPartitionKeys(table) - : table.getPartCols()) { + for (FieldSchema fs : table.getPartCols()) { String val = partSpec.get(fs.getName()); values.add(val); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 736e6e8c9f1a..330f37d1ef1c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -173,7 +173,8 @@ protected void initialize(Table table, // set default if location is not set and this is a physical // table partition (not a view partition) if (table.getDataLocation() != null) { - Path partPath = new Path(table.getDataLocation(), Warehouse.makePartName(table.getPartCols(), tPartition.getValues())); + Path partPath = new Path(table.getDataLocation(), + Warehouse.makePartName(table.getPartCols(), tPartition.getValues())); tPartition.getSd().setLocation(partPath.toString()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index f857e7d505f1..7d0c00d61d45 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; @@ -118,6 +119,8 @@ public class Table implements Serializable { private transient StorageHandlerInfo storageHandlerInfo; private transient MaterializedViewMetadata materializedViewMetadata; + private List cachedPartCols; + private TableSpec tableSpec; private boolean materializedTable; @@ -193,6 +196,9 @@ public Table makeCopy() { newTab.setMetaTable(this.getMetaTable()); newTab.setSnapshotRef(this.getSnapshotRef()); + if (this.cachedPartCols != null) { + newTab.cachedPartCols = new ArrayList<>(this.cachedPartCols); + } return newTab; } @@ -214,6 +220,7 @@ public org.apache.hadoop.hive.metastore.api.Table getTTable() { */ public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) { this.tTable = tTable; + clearCachedPartCols(); } /** @@ -248,7 +255,7 @@ public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) { org.apache.hadoop.hive.metastore.api.Table t = new org.apache.hadoop.hive.metastore.api.Table(); { t.setSd(sd); - t.setPartitionKeys(new ArrayList()); + t.setPartitionKeys(null); t.setParameters(new HashMap()); t.setTableType(TableType.MANAGED_TABLE.toString()); t.setDbName(databaseName); @@ -595,14 +602,35 @@ public boolean equals(Object obj) { } public List getPartCols() { - List partKeys = tTable.getPartitionKeys(); - if (partKeys == null) { - partKeys = new ArrayList<>(); - tTable.setPartitionKeys(partKeys); + if (cachedPartCols != null) { + return cachedPartCols; + } + List partKeys; + if (isTableTypeSet() && hasNonNativePartitionSupport()) { + partKeys = getStorageHandler().getPartitionKeys(this); + } else { + partKeys = tTable.getPartitionKeys(); + if (partKeys == null) { + partKeys = new ArrayList<>(); + tTable.setPartitionKeys(partKeys); + } } + cachedPartCols = partKeys; return partKeys; } + private void clearCachedPartCols() { + cachedPartCols = null; + } + + private boolean isTableTypeSet() { + if (tTable.getParameters() == null) { + return false; + } + String tableType = tTable.getParameters().get(HiveMetaHook.TABLE_TYPE); + return tableType != null; + } + public FieldSchema getPartColByName(String colName) { return getPartCols().stream() .filter(key -> key.getName().toLowerCase().equals(colName)) @@ -610,9 +638,7 @@ public FieldSchema getPartColByName(String colName) { } public List getPartColNames() { - List partCols = hasNonNativePartitionSupport() ? - getStorageHandler().getPartitionKeys(this) : getPartCols(); - return partCols.stream().map(FieldSchema::getName) + return getPartCols().stream().map(FieldSchema::getName) .collect(Collectors.toList()); } @@ -761,14 +787,22 @@ private List getColsInternal(boolean forMs) { * @return List<FieldSchema> */ public List getAllCols() { - ArrayList f_list = new ArrayList(); - f_list.addAll(getCols()); - f_list.addAll(getPartCols()); - return f_list; + List allCols = new ArrayList<>(getCols()); + Set colNames = new HashSet<>(); + for (FieldSchema col : allCols) { + colNames.add(col.getName()); + } + for (FieldSchema col : getPartCols()) { + if (!colNames.contains(col.getName())) { + allCols.add(col); + } + } + return allCols; } public void setPartCols(List partCols) { tTable.setPartitionKeys(partCols); + clearCachedPartCols(); } public String getCatName() { @@ -812,7 +846,7 @@ public void setOutputFormatClass(String name) throws HiveException { } public boolean isPartitioned() { - return hasNonNativePartitionSupport() ? getStorageHandler().isPartitioned(this) : + return hasNonNativePartitionSupport() ? getStorageHandler().isPartitioned(this) : CollectionUtils.isNotEmpty(getPartCols()); } @@ -1153,7 +1187,7 @@ public static void validateColumns(List columns, List } colNames.add(colName); } - if (partCols != null) { + if (partCols != null && !icebergTable) { // there is no overlap between columns and partitioning columns for (FieldSchema partCol: partCols) { String colName = normalize(partCol.getName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java index 3d3e4ce7663f..0e914843e2e1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java @@ -807,8 +807,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, for (FieldNode col : cols) { int index = originalOutputColumnNames.indexOf(col.getFieldName()); Table tab = cppCtx.getParseContext().getViewProjectToTableSchema().get(op); - List fullFieldList = new ArrayList(tab.getCols()); - fullFieldList.addAll(tab.getPartCols()); + List fullFieldList = new ArrayList<>(tab.getAllCols()); cppCtx.getParseContext().getColumnAccessInfo() .add(tab.getCompleteName(), fullFieldList.get(index).getName()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java index 06912a1b3226..05f3b85f271f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java @@ -175,7 +175,8 @@ private void analyzeAcidExport(ASTNode ast, Table exportTable, ASTNode tokRefOrN //now generate insert statement //insert into newTableName select * from ts StringBuilder rewrittenQueryStr = generateExportQuery( - newTable.getPartCols(), tokRefOrNameExportTable, (ASTNode) tokRefOrNameExportTable.parent, newTableName); + newTable.getPartCols(), + tokRefOrNameExportTable, (ASTNode) tokRefOrNameExportTable.parent, newTableName); ReparseResult rr = ParseUtils.parseRewrittenQuery(ctx, rewrittenQueryStr); Context rewrittenCtx = rr.rewrittenCtx; rewrittenCtx.setIsUpdateDeleteMerge(false); //it's set in parseRewrittenQuery() diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java index 1b6f73ea264f..b751b56216e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java @@ -83,7 +83,8 @@ public ColumnStatsAutoGatherContext(SemanticAnalyzer sa, HiveConf conf, this.isInsertInto = isInsertInto; this.origCtx = ctx; columns = tbl.getCols(); - partitionColumns = tbl.getPartCols(); + // current behaviour intact until we have getCols() giving only non-partition columns for non native tables as well + partitionColumns = tbl.hasNonNativePartitionSupport() ? new ArrayList<>() : tbl.getPartCols(); } public List getLoadFileWork() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 023934d9eb24..968e37f1b958 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -193,8 +193,7 @@ private static CharSequence genPartitionClause(Table tbl, List pa private static String getColTypeOf(Table tbl, String partKey) { - for (FieldSchema fs : tbl.hasNonNativePartitionSupport() ? - tbl.getStorageHandler().getPartitionKeys(tbl) : tbl.getPartitionKeys()) { + for (FieldSchema fs : tbl.getPartCols()) { if (partKey.equalsIgnoreCase(fs.getName())) { return fs.getType().toLowerCase(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 4d4956fbec13..dcf197a2c201 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.QueryState; @@ -89,7 +88,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.TreeMap; /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java index 882840ffef5a..ac0a7e8f12be 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.rewrite.MergeStatement; import org.apache.hadoop.hive.ql.parse.rewrite.RewriterFactory; -import org.apache.hadoop.hive.ql.plan.HiveOperation; import java.util.ArrayList; import java.util.HashMap; @@ -230,7 +229,7 @@ private MergeStatement.UpdateClause handleUpdate(ASTNode whenMatchedUpdateClause String deleteExtraPredicate) throws SemanticException { assert whenMatchedUpdateClause.getType() == HiveParser.TOK_MATCHED; assert getWhenClauseOperation(whenMatchedUpdateClause).getType() == HiveParser.TOK_UPDATE; - Map newValuesMap = new HashMap<>(targetTable.getCols().size() + targetTable.getPartCols().size()); + Map newValuesMap = new HashMap<>(targetTable.getAllCols().size()); ASTNode setClause = (ASTNode)getWhenClauseOperation(whenMatchedUpdateClause).getChild(0); //columns being updated -> update expressions; "setRCols" (last param) is null because we use actual expressions //before re-parsing, i.e. they are known to SemanticAnalyzer logic diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java index 9964b9369065..5fa22d9a2f82 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java @@ -581,8 +581,7 @@ public static Map> getFullPartitionSpecs( CommonTree ast, Table table, Configuration conf, boolean canGroupExprs) throws SemanticException { String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); Map colTypes = new HashMap<>(); - List partitionKeys = table.hasNonNativePartitionSupport() ? - table.getStorageHandler().getPartitionKeys(table) : table.getPartitionKeys(); + List partitionKeys = table.getPartCols(); for (FieldSchema fs : partitionKeys) { colTypes.put(fs.getName().toLowerCase(), fs.getType()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteMergeRewriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteMergeRewriter.java index b72f2496d938..b7335473da85 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteMergeRewriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteMergeRewriter.java @@ -202,7 +202,7 @@ public void appendWhenMatchedUpdateClause(MergeStatement.UpdateClause updateClau sqlGenerator.append(hintStr); hintStr = null; } - List values = new ArrayList<>(targetTable.getCols().size() + targetTable.getPartCols().size()); + List values = new ArrayList<>(targetTable.getAllCols().size()); values.addAll(sqlGenerator.getDeleteValues(Context.Operation.MERGE)); addValues(targetTable, targetAlias, updateClause.getNewValuesMap(), values); addValuesForRowLineageForCopyOnMerge(isRowLineageSupported, values, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/MergeRewriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/MergeRewriter.java index 3ec2e580f046..c436e85a4eb6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/MergeRewriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/MergeRewriter.java @@ -224,7 +224,7 @@ public void appendWhenMatchedUpdateClause(MergeStatement.UpdateClause updateClau sqlGenerator.append(" -- update clause").append("\n"); List valuesAndAcidSortKeys = new ArrayList<>( - targetTable.getCols().size() + targetTable.getPartCols().size() + 1); + targetTable.getAllCols().size() + 1); valuesAndAcidSortKeys.addAll(sqlGenerator.getSortKeys(Operation.MERGE)); addValues(targetTable, targetAlias, updateClause.getNewValuesMap(), valuesAndAcidSortKeys); sqlGenerator.appendInsertBranch(hintStr, valuesAndAcidSortKeys); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/SplitMergeRewriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/SplitMergeRewriter.java index 84fcf186f6b7..06edaca90f0f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/SplitMergeRewriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/SplitMergeRewriter.java @@ -58,7 +58,7 @@ public void appendWhenMatchedUpdateClause(MergeStatement.UpdateClause updateClau String onClauseAsString = mergeStatement.getOnClauseAsText(); sqlGenerator.append(" -- update clause (insert part)\n"); - List values = new ArrayList<>(targetTable.getCols().size() + targetTable.getPartCols().size()); + List values = new ArrayList<>(targetTable.getAllCols().size()); addValues(targetTable, targetAlias, updateClause.getNewValuesMap(), values); addRowLineageColumnsForWhenMatchedUpdateClause(isRowLineageSupported, values, targetAlias, conf); sqlGenerator.appendInsertBranch(hintStr, values); From 565a2eb69f4a0fefb6db2bcdb9848a76681253ab Mon Sep 17 00:00:00 2001 From: Ramit Gupta Date: Thu, 9 Apr 2026 15:33:03 +0530 Subject: [PATCH 2/5] commit-2 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java | 2 +- .../java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 7d0c00d61d45..3879d8d78040 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -255,7 +255,7 @@ public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) { org.apache.hadoop.hive.metastore.api.Table t = new org.apache.hadoop.hive.metastore.api.Table(); { t.setSd(sd); - t.setPartitionKeys(null); + t.setPartitionKeys(new ArrayList()); t.setParameters(new HashMap()); t.setTableType(TableType.MANAGED_TABLE.toString()); t.setDbName(databaseName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index d5c683daa303..dfe473c8ef56 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -3048,6 +3048,7 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc final NotNullConstraint nnc = tabMetaData.getNotNullConstraint(); final PrimaryKeyInfo pkc = tabMetaData.getPrimaryKeyInfo(); + Set alreadyAdded = new HashSet<>(); for (StructField structField : fields) { colName = structField.getFieldName(); colInfo = new ColumnInfo( @@ -3056,6 +3057,7 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc isNullable(colName, nnc, pkc), tableAlias, false); colInfo.setSkewedCol(isSkewedCol(tableAlias, qb, colName)); rr.put(tableAlias, colName, colInfo); + alreadyAdded.add(colName); cInfoLst.add(colInfo); } // TODO: Fix this @@ -3065,6 +3067,9 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc // 3.2 Add column info corresponding to partition columns for (FieldSchema part_col : tabMetaData.getPartCols()) { colName = part_col.getName(); + if (alreadyAdded.contains(colName)) { + continue; + } colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), isNullable(colName, nnc, pkc), tableAlias, true); From 9878ee1cd458ccbb7d7dced2bcbe36cf139f9775 Mon Sep 17 00:00:00 2001 From: Ramit Gupta Date: Fri, 10 Apr 2026 01:14:09 +0530 Subject: [PATCH 3/5] corrected bucket-map-join test --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java | 6 +++--- .../org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java | 7 +++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 3879d8d78040..89934346f047 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -632,9 +632,9 @@ private boolean isTableTypeSet() { } public FieldSchema getPartColByName(String colName) { - return getPartCols().stream() - .filter(key -> key.getName().toLowerCase().equals(colName)) - .findFirst().orElse(null); + return hasNonNativePartitionSupport() ? null : getPartCols().stream() + .filter(key -> key.getName().toLowerCase().equals(colName)) + .findFirst().orElse(null); } public List getPartColNames() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 5c4f049f0350..58840b878342 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -12001,6 +12001,8 @@ private Operator genTablePlan(String alias, QB qb) throws SemanticException { // Determine row schema for TSOP. // Include column names from SerDe, the partition and virtual columns. rwsch = new RowResolver(); + Set partCols = tab.hasNonNativePartitionSupport() ? + Sets.newHashSet(tab.getPartColNames()) : Collections.emptySet(); try { // Including parameters passed in the query if (properties != null) { @@ -12018,8 +12020,6 @@ private Operator genTablePlan(String alias, QB qb) throws SemanticException { deserializer.handleJobLevelConfiguration(conf); List fields = rowObjectInspector .getAllStructFieldRefs(); - Set partCols = tab.hasNonNativePartitionSupport() ? - Sets.newHashSet(tab.getPartColNames()) : Collections.emptySet(); for (int i = 0; i < fields.size(); i++) { /** * if the column is a skewed column, use ColumnInfo accordingly @@ -12039,6 +12039,9 @@ private Operator genTablePlan(String alias, QB qb) throws SemanticException { // Hack!! - refactor once the metadata APIs with types are ready // Finally add the partitioning columns for (FieldSchema part_col : tab.getPartCols()) { + if(partCols.contains(part_col.getName())){ + break; + } LOG.trace("Adding partition col: " + part_col); rwsch.put(alias, part_col.getName(), new ColumnInfo(part_col.getName(), TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), alias, true)); From 8dc7809f77f758b5cccacb8f001b1edc2bca785d Mon Sep 17 00:00:00 2001 From: Ramit Gupta Date: Fri, 10 Apr 2026 11:34:14 +0530 Subject: [PATCH 4/5] corrected update statements --- .../hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java | 2 +- .../hadoop/hive/ql/parse/rewrite/MergeRewriter.java | 6 ++++-- .../hadoop/hive/ql/parse/rewrite/SplitUpdateRewriter.java | 2 +- .../ql/parse/rewrite/sql/MultiInsertSqlGenerator.java | 8 +++++++- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java index 101f6b1fc3d8..8e52f63c6611 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java @@ -120,7 +120,7 @@ protected void checkValidSetClauseTarget(ASTNode colName, Table targetTable) thr // Make sure this isn't one of the partitioning columns, that's not supported. for (FieldSchema fschema : targetTable.getPartCols()) { - if (fschema.getName().equalsIgnoreCase(columnName)) { + if (fschema.getName().equalsIgnoreCase(columnName) && !targetTable.hasNonNativePartitionSupport()) { throw new SemanticException(ErrorMsg.UPDATE_CANNOT_UPDATE_PART_VALUE.getMsg()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/MergeRewriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/MergeRewriter.java index c436e85a4eb6..c9ba50d110f8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/MergeRewriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/MergeRewriter.java @@ -250,8 +250,10 @@ protected void addValues(Table targetTable, String targetAlias, Map values.add( - formatter.apply(fieldSchema.getName()))); + if (!targetTable.hasNonNativePartitionSupport()) { + targetTable.getPartCols().forEach(fieldSchema -> values.add( + formatter.apply(fieldSchema.getName()))); + } } protected String getRhsExpValue(String newValue, String alias) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/SplitUpdateRewriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/SplitUpdateRewriter.java index d14ddc7eb485..838d1d8a09a6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/SplitUpdateRewriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/SplitUpdateRewriter.java @@ -98,7 +98,7 @@ public ParseUtils.ReparseResult rewrite(Context context, UpdateStatement updateB insertValues.add(sqlGenerator.qualify(identifier)); } - if (updateBlock.getTargetTable().getPartCols() != null) { + if (!updateBlock.getTargetTable().hasNonNativePartitionSupport()) { updateBlock.getTargetTable().getPartCols().forEach( fieldSchema -> insertValues.add(sqlGenerator.qualify(HiveUtils.unparseIdentifier(fieldSchema.getName(), conf)))); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/sql/MultiInsertSqlGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/sql/MultiInsertSqlGenerator.java index 7587daf13055..6576dd28d2ec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/sql/MultiInsertSqlGenerator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/sql/MultiInsertSqlGenerator.java @@ -111,6 +111,9 @@ public void appendPartitionColsOfTarget() { */ public void appendPartitionCols(Table table) { // If the table is partitioned we have to put the partition() clause in + if (table.hasNonNativePartitionSupport()) { + return; + } List partCols = table.getPartCols(); if (partCols == null || partCols.isEmpty()) { return; @@ -148,7 +151,10 @@ public void removeLastChar() { } public void appendPartColsOfTargetTableWithComma(String alias) { - if (targetTable.getPartCols() == null || targetTable.getPartCols().isEmpty()) { + if (targetTable.hasNonNativePartitionSupport()) { + return; + } + if (targetTable.getPartCols().isEmpty()) { return; } queryStr.append(','); From 831b9b2091a6843ea06c945b7b9ce4ba07de3b39 Mon Sep 17 00:00:00 2001 From: Ramit Gupta Date: Sat, 11 Apr 2026 20:13:07 +0530 Subject: [PATCH 5/5] corrected load, partition evolution tests --- .../org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java | 3 +++ .../java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java | 4 ++-- .../apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java | 5 +++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java index 17a964a44583..ae7696c3e536 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java @@ -75,6 +75,9 @@ public static boolean isSchemaEvolutionEnabled(Table table, Configuration conf) } public static boolean isFullPartitionSpec(Table table, Map partitionSpec) { + if (table.hasNonNativePartitionSupport()) { + return true; + } for (FieldSchema partitionCol : table.getPartCols()) { if (partitionSpec.get(partitionCol.getName()) == null) { return false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index dfe473c8ef56..eac67c75618b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -3067,8 +3067,8 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc // 3.2 Add column info corresponding to partition columns for (FieldSchema part_col : tabMetaData.getPartCols()) { colName = part_col.getName(); - if (alreadyAdded.contains(colName)) { - continue; + if (tabMetaData.hasNonNativePartitionSupport()) { + break; } colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index eb4a73f1e5e9..6aa5b08fd5f4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -23,6 +23,7 @@ import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; @@ -511,7 +512,7 @@ private void reparseAndSuperAnalyze(Table table, URI fromURI) throws SemanticExc // Partition spec was already validated by caller when create TableSpec object. // So, need not validate inpPartSpec here. - List parts = table.getPartCols(); + List parts = table.hasNonNativePartitionSupport() ? Collections.emptyList() : table.getPartCols(); if (tableTree.getChildCount() >= 2) { ASTNode partSpecNode = (ASTNode) tableTree.getChild(1); inpPartSpec = new HashMap<>(partSpecNode.getChildCount()); @@ -561,7 +562,7 @@ private void reparseAndSuperAnalyze(Table table, URI fromURI) throws SemanticExc } rewrittenQueryStr.append(getFullTableNameForSQL((ASTNode)(tableTree.getChild(0)))); - addPartitionColsToInsert(table.getPartCols(), inpPartSpec, rewrittenQueryStr); + addPartitionColsToInsert(parts, inpPartSpec, rewrittenQueryStr); rewrittenQueryStr.append(" select * from "); rewrittenQueryStr.append(tempTblName);