-
Notifications
You must be signed in to change notification settings - Fork 29.1k
[SPARK-55092][SQL] Disable partition grouping in KeyGroupedPartitioning when not needed
#53859
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 5 commits
c63f709
7f79f9e
b5ab00a
de7b287
4fd8026
b04bb61
c28fc3f
dd62e72
2a882ec
77b78d9
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -36,6 +36,7 @@ import org.apache.spark.sql.catalyst.analysis.{LazyExpression, NameParameterized | |
| import org.apache.spark.sql.catalyst.expressions.codegen.ByteCodeStats | ||
| import org.apache.spark.sql.catalyst.plans.QueryPlan | ||
| import org.apache.spark.sql.catalyst.plans.logical.{AppendData, Command, CommandResult, CompoundBody, CreateTableAsSelect, LogicalPlan, OverwriteByExpression, OverwritePartitionsDynamic, ReplaceTableAsSelect, ReturnAnswer, Union, WithCTE} | ||
| import org.apache.spark.sql.catalyst.plans.physical.UnspecifiedDistribution | ||
| import org.apache.spark.sql.catalyst.rules.{PlanChangeLogger, Rule} | ||
| import org.apache.spark.sql.catalyst.util.StringUtils.PlanStringConcat | ||
| import org.apache.spark.sql.catalyst.util.truncatedString | ||
|
|
@@ -622,6 +623,11 @@ object QueryExecution { | |
| sparkSession: SparkSession, | ||
| adaptiveExecutionRule: Option[InsertAdaptiveSparkPlan] = None, | ||
| subquery: Boolean): Seq[Rule[SparkPlan]] = { | ||
| val requiredDistribution = if (subquery) { | ||
|
||
| Some(UnspecifiedDistribution) | ||
| } else { | ||
| None | ||
| } | ||
| // `AdaptiveSparkPlanExec` is a leaf node. If inserted, all the following rules will be no-op | ||
| // as the original plan is hidden behind `AdaptiveSparkPlanExec`. | ||
| adaptiveExecutionRule.toSeq ++ | ||
|
|
@@ -630,7 +636,7 @@ object QueryExecution { | |
| PlanDynamicPruningFilters(sparkSession), | ||
| PlanSubqueries(sparkSession), | ||
| RemoveRedundantProjects, | ||
| EnsureRequirements(), | ||
| EnsureRequirements(requiredDistribution = requiredDistribution), | ||
| // This rule must be run after `EnsureRequirements`. | ||
| InsertSortForLimitAndOffset, | ||
| // `ReplaceHashWithSortAgg` needs to be added after `EnsureRequirements` to guarantee the | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -66,19 +66,22 @@ case class EnsureRequirements( | |
| case (child, distribution) if child.outputPartitioning.satisfies(distribution) => | ||
| ensureOrdering(child, distribution) | ||
| case (child, BroadcastDistribution(mode)) => | ||
| BroadcastExchangeExec(mode, child) | ||
| val newChild = disableKeyGroupingIfNotNeeded(child) | ||
| BroadcastExchangeExec(mode, newChild) | ||
| case (child, distribution) => | ||
| val numPartitions = distribution.requiredNumPartitions | ||
| .getOrElse(conf.numShufflePartitions) | ||
| distribution match { | ||
| case _: StatefulOpClusteredDistribution => | ||
| val newChild = disableKeyGroupingIfNotNeeded(child) | ||
| ShuffleExchangeExec( | ||
| distribution.createPartitioning(numPartitions), child, | ||
| distribution.createPartitioning(numPartitions), newChild, | ||
| REQUIRED_BY_STATEFUL_OPERATOR) | ||
|
|
||
| case _ => | ||
| val newChild = disableKeyGroupingIfNotNeeded(child) | ||
| ShuffleExchangeExec( | ||
| distribution.createPartitioning(numPartitions), child, shuffleOrigin) | ||
| distribution.createPartitioning(numPartitions), newChild, shuffleOrigin) | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -224,8 +227,11 @@ case class EnsureRequirements( | |
|
|
||
| child match { | ||
| case ShuffleExchangeExec(_, c, so, ps) => | ||
| ShuffleExchangeExec(newPartitioning, c, so, ps) | ||
| case _ => ShuffleExchangeExec(newPartitioning, child) | ||
| val newChild = disableKeyGroupingIfNotNeeded(c) | ||
| ShuffleExchangeExec(newPartitioning, newChild, so, ps) | ||
| case _ => | ||
| val newChild = disableKeyGroupingIfNotNeeded(child) | ||
|
||
| ShuffleExchangeExec(newPartitioning, newChild) | ||
| } | ||
| } | ||
| } | ||
|
|
@@ -695,6 +701,21 @@ case class EnsureRequirements( | |
| child, values, joinKeyPositions, reducers, applyPartialClustering, replicatePartitions)) | ||
| } | ||
|
|
||
| private def disableKeyGroupingIfNotNeeded(child: SparkPlan) = { | ||
|
||
| if (canApplyPartialClusteredDistribution(child)) { | ||
| populateNoGroupingPartitionInfo(child) | ||
| } else { | ||
| child | ||
| } | ||
| } | ||
|
|
||
| private def populateNoGroupingPartitionInfo(plan: SparkPlan): SparkPlan = plan match { | ||
|
||
| case scan: BatchScanExec => | ||
| val newScan = scan.copy(spjParams = scan.spjParams.copy(noGrouping = true)) | ||
| newScan.copyTagsFrom(scan) | ||
| newScan | ||
| case node => node.mapChildren(child => populateNoGroupingPartitionInfo(child)) | ||
| } | ||
|
|
||
| private def populateJoinKeyPositions( | ||
| plan: SparkPlan, | ||
|
|
@@ -843,9 +864,14 @@ case class EnsureRequirements( | |
| } else { | ||
| REPARTITION_BY_COL | ||
| } | ||
| val groupingDisabledPlan = if (requiredDistribution.get == UnspecifiedDistribution) { | ||
| disableKeyGroupingIfNotNeeded(newPlan) | ||
| } else { | ||
| newPlan | ||
| } | ||
| val finalPlan = ensureDistributionAndOrdering( | ||
| None, | ||
| newPlan :: Nil, | ||
| groupingDisabledPlan :: Nil, | ||
| requiredDistribution.get :: Nil, | ||
| Seq(Nil), | ||
| shuffleOrigin) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
not sure i get this, if its not a subquery we pass in any requiredDistribution?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah, let me change this tomorrow and pass in
subquerydirectly intoEnsureRequirements, that way this will be much cleaner.Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed in b04bb61 and added comments in c28fc3f.