-
Notifications
You must be signed in to change notification settings - Fork 497
[FLINK-36192][autocaler] Optimize the logic when partitions or key groups cannot be evenly distributed to subtasks #879
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 9 commits
b179ba5
4276f4c
b65b0dc
ab9d4d6
8e7d156
b432b2b
9a33ac1
f6bab10
991ba9d
5062956
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -50,6 +50,7 @@ | |||||
| import static org.apache.flink.autoscaler.config.AutoScalerOptions.VERTEX_MIN_PARALLELISM; | ||||||
| import static org.apache.flink.autoscaler.metrics.ScalingMetric.EXPECTED_PROCESSING_RATE; | ||||||
| import static org.apache.flink.autoscaler.metrics.ScalingMetric.MAX_PARALLELISM; | ||||||
| import static org.apache.flink.autoscaler.metrics.ScalingMetric.NUM_SOURCE_PARTITIONS; | ||||||
| import static org.apache.flink.autoscaler.metrics.ScalingMetric.PARALLELISM; | ||||||
| import static org.apache.flink.autoscaler.metrics.ScalingMetric.TRUE_PROCESSING_RATE; | ||||||
| import static org.apache.flink.autoscaler.topology.ShipStrategy.HASH; | ||||||
|
|
@@ -66,6 +67,14 @@ public class JobVertexScaler<KEY, Context extends JobAutoScalerContext<KEY>> { | |||||
| protected static final String INEFFECTIVE_MESSAGE_FORMAT = | ||||||
| "Ineffective scaling detected for %s (expected increase: %s, actual increase %s). Blocking of ineffective scaling decisions is %s"; | ||||||
|
|
||||||
| @VisibleForTesting protected static final String SCALING_LIMITED = "ScalingLimited"; | ||||||
|
|
||||||
| @VisibleForTesting | ||||||
| protected static final String SCALE_LIMITED_MESSAGE_FORMAT = | ||||||
| "Scaling limited detected for %s (expected parallelism: %s, actual parallelism %s). " | ||||||
| + "Scaling limited due to source partitions : %s," | ||||||
| + "upperBoundForAlignment(maxParallelism or parallelismUpperLimit): %s, parallelismLowerLimit: %s."; | ||||||
|
|
||||||
| private Clock clock = Clock.system(ZoneId.systemDefault()); | ||||||
|
|
||||||
| private final AutoScalerEventHandler<KEY, Context> autoScalerEventHandler; | ||||||
|
|
@@ -193,12 +202,16 @@ public ParallelismChange computeScaleTargetParallelism( | |||||
|
|
||||||
| int newParallelism = | ||||||
| scale( | ||||||
| vertex, | ||||||
| currentParallelism, | ||||||
| inputShipStrategies, | ||||||
| (int) evaluatedMetrics.get(NUM_SOURCE_PARTITIONS).getCurrent(), | ||||||
| (int) evaluatedMetrics.get(MAX_PARALLELISM).getCurrent(), | ||||||
| scaleFactor, | ||||||
| Math.min(currentParallelism, conf.getInteger(VERTEX_MIN_PARALLELISM)), | ||||||
| Math.max(currentParallelism, conf.getInteger(VERTEX_MAX_PARALLELISM))); | ||||||
| Math.max(currentParallelism, conf.getInteger(VERTEX_MAX_PARALLELISM)), | ||||||
| autoScalerEventHandler, | ||||||
| context); | ||||||
|
|
||||||
| if (newParallelism == currentParallelism) { | ||||||
| // Clear delayed scale down request if the new parallelism is equal to | ||||||
|
|
@@ -345,15 +358,22 @@ private boolean detectIneffectiveScaleUp( | |||||
| * <p>Also, in order to ensure the data is evenly spread across subtasks, we try to adjust the | ||||||
| * parallelism for source and keyed vertex such that it divides the maxParallelism without a | ||||||
| * remainder. | ||||||
| * | ||||||
| * <p>This method also attempts to adjust the parallelism to ensure it aligns well with the | ||||||
| * number of source partitions if a vertex has a known source partition count. | ||||||
| */ | ||||||
| @VisibleForTesting | ||||||
| protected static int scale( | ||||||
| protected static <KEY, Context extends JobAutoScalerContext<KEY>> int scale( | ||||||
| JobVertexID vertex, | ||||||
| int currentParallelism, | ||||||
| Collection<ShipStrategy> inputShipStrategies, | ||||||
| int numSourcePartitions, | ||||||
| int maxParallelism, | ||||||
| double scaleFactor, | ||||||
| int parallelismLowerLimit, | ||||||
| int parallelismUpperLimit) { | ||||||
| int parallelismUpperLimit, | ||||||
| AutoScalerEventHandler<KEY, Context> eventHandler, | ||||||
| Context context) { | ||||||
| checkArgument( | ||||||
| parallelismLowerLimit <= parallelismUpperLimit, | ||||||
| "The parallelism lower limitation must not be greater than the parallelism upper limitation."); | ||||||
|
|
@@ -383,23 +403,62 @@ protected static int scale( | |||||
| // Apply min/max parallelism | ||||||
| newParallelism = Math.min(Math.max(parallelismLowerLimit, newParallelism), upperBound); | ||||||
|
|
||||||
| var adjustByMaxParallelism = | ||||||
| inputShipStrategies.isEmpty() || inputShipStrategies.contains(HASH); | ||||||
| if (!adjustByMaxParallelism) { | ||||||
| var adjustByMaxParallelismOrPartitions = | ||||||
| numSourcePartitions > 0 || inputShipStrategies.contains(HASH); | ||||||
| if (!adjustByMaxParallelismOrPartitions) { | ||||||
| return newParallelism; | ||||||
| } | ||||||
|
|
||||||
| // When the shuffle type of vertex inputs contains keyBy or vertex is a source, we try to | ||||||
| // adjust the parallelism such that it divides the maxParallelism without a remainder | ||||||
| // => data is evenly spread across subtasks | ||||||
| for (int p = newParallelism; p <= maxParallelism / 2 && p <= upperBound; p++) { | ||||||
| if (maxParallelism % p == 0) { | ||||||
| var numKeyGroupsOrPartitions = | ||||||
| numSourcePartitions <= 0 ? maxParallelism : numSourcePartitions; | ||||||
| var upperBoundForAlignment = | ||||||
| Math.min( | ||||||
| // Optimize the case where newParallelism <= maxParallelism / 2 | ||||||
| newParallelism > numKeyGroupsOrPartitions / 2 | ||||||
| ? numKeyGroupsOrPartitions | ||||||
| : numKeyGroupsOrPartitions / 2, | ||||||
| upperBound); | ||||||
|
|
||||||
| // When the shuffle type of vertex inputs contains keyBy or vertex is a source, | ||||||
| // we try to adjust the parallelism such that it divides | ||||||
| // the numKeyGroupsOrPartitions without a remainder => data is evenly spread across subtasks | ||||||
| for (int p = newParallelism; p <= upperBoundForAlignment; p++) { | ||||||
| if (numKeyGroupsOrPartitions % p == 0) { | ||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. About this comment #879 (comment), I'm thinking whether the following change is more reasonable? Note:
Suggested change
For example: maxParallelism is 200, and new parallelism is 60. (Some subtasks consume 4 keyGroups, the rest of subtask consume 3 keyGroups)
Also, it's a bit beyond the scope of this PR. I could file a separate PR if you think it makes sense. Of course, it's acceptable to be done at this PR. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think that makes sense, but it makes the scaling more aggressive and less balanced. If we want to be more conservative, maybe 100 is ok in this scenario, where there is actually a divisor without a remainder. When there isn't, I think what you propose is way better than just using the initially provided parallelism. In summary, I'm proposing to do a two-step process, similarly as for the partitions, where we first try to find a parallelism that divides the key groups without a remainder, and if that fails we do what you propose. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Thanks for the review, I agree with this, We can introduce an additional parameter to enable a more aggressive strategy: #879 (comment). But by default, a divisor of the number of partitions is still used to ensure balanced consumption. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sounds make sense to me, we could introduce an I wanna check with @huyuanfeng2018 and @mxm : The strategy will work for both source partition and key group, right? As I understand, we could unify the strategy for these 2 cases. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I think we can unify strategies There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sounds good! |
||||||
| return p; | ||||||
| } | ||||||
| } | ||||||
|
|
||||||
| // If parallelism adjustment fails, use originally computed parallelism | ||||||
| return newParallelism; | ||||||
| // When adjust the parallelism after rounding up cannot be evenly divided by | ||||||
| // numKeyGroupsOrPartitions, Try to find the smallest parallelism that can satisfy the | ||||||
| // current consumption rate. | ||||||
| int p = newParallelism; | ||||||
| for (; p > 0; p--) { | ||||||
| if (numKeyGroupsOrPartitions / p > numKeyGroupsOrPartitions / newParallelism) { | ||||||
| if (numKeyGroupsOrPartitions % p != 0) { | ||||||
| p++; | ||||||
| } | ||||||
| break; | ||||||
| } | ||||||
| } | ||||||
1996fanrui marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
|
|
||||||
| p = Math.max(p, parallelismLowerLimit); | ||||||
| var message = | ||||||
| String.format( | ||||||
| SCALE_LIMITED_MESSAGE_FORMAT, | ||||||
| vertex, | ||||||
| newParallelism, | ||||||
| p, | ||||||
| numSourcePartitions, | ||||||
huyuanfeng2018 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||||
| upperBound, | ||||||
| parallelismLowerLimit); | ||||||
| eventHandler.handleEvent( | ||||||
| context, | ||||||
| AutoScalerEventHandler.Type.Warning, | ||||||
| SCALING_LIMITED, | ||||||
| message, | ||||||
| SCALING_LIMITED + vertex + (scaleFactor * currentParallelism), | ||||||
| context.getConfiguration().get(SCALING_EVENT_INTERVAL)); | ||||||
| return p; | ||||||
| } | ||||||
|
|
||||||
| @VisibleForTesting | ||||||
|
|
||||||
Uh oh!
There was an error while loading. Please reload this page.