-
Notifications
You must be signed in to change notification settings - Fork 25.6k
Account for simulated utilization threshold in WriteLoadConstraintDeciderIT #133894
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
d9b45d4
f506534
ba15f8f
c995fb7
946469e
dc48fbb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -68,6 +68,8 @@ protected Collection<Class<? extends Plugin>> getMockPlugins() { | |
| */ | ||
| public void testHighNodeWriteLoadPreventsNewShardAllocation() { | ||
| int randomUtilizationThresholdPercent = randomIntBetween(50, 100); | ||
| int numberOfWritePoolThreads = randomIntBetween(10, 20); | ||
| double shardWriteLoad = randomDoubleBetween(0.0, 0.2, true); | ||
|
||
| Settings settings = Settings.builder() | ||
| .put( | ||
| WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_ENABLED_SETTING.getKey(), | ||
|
|
@@ -115,7 +117,11 @@ public void testHighNodeWriteLoadPreventsNewShardAllocation() { | |
| ); | ||
|
|
||
| String indexName = randomIdentifier(); | ||
| int randomNumberOfShards = randomIntBetween(15, 40); // Pick a high number of shards, so it is clear assignment is not accidental. | ||
| int randomNumberOfShards = randomIntBetween(10, 20); // Pick a high number of shards, so it is clear assignment is not accidental. | ||
|
|
||
| // Calculate the maximum utilization a node can report while still being able to accept all relocating shards | ||
| double additionalLoadFromAllShards = (shardWriteLoad * randomNumberOfShards) / numberOfWritePoolThreads; | ||
|
||
| int maxUtilizationPercent = randomUtilizationThresholdPercent - (int) (additionalLoadFromAllShards * 100) - 1; | ||
|
|
||
| var verifyAssignmentToFirstNodeListener = ClusterServiceUtils.addMasterTemporaryStateListener(clusterState -> { | ||
| var indexRoutingTable = clusterState.routingTable().index(indexName); | ||
|
|
@@ -154,19 +160,19 @@ public void testHighNodeWriteLoadPreventsNewShardAllocation() { | |
| final DiscoveryNode thirdDiscoveryNode = getDiscoveryNode(thirdDataNodeName); | ||
| final NodeUsageStatsForThreadPools firstNodeNonHotSpottingNodeStats = createNodeUsageStatsForThreadPools( | ||
| firstDiscoveryNode, | ||
| 2, | ||
| 0.5f, | ||
| numberOfWritePoolThreads, | ||
| randomIntBetween(0, maxUtilizationPercent) / 100f, | ||
| 0 | ||
| ); | ||
| final NodeUsageStatsForThreadPools secondNodeNonHotSpottingNodeStats = createNodeUsageStatsForThreadPools( | ||
| secondDiscoveryNode, | ||
| 2, | ||
| 0.5f, | ||
| numberOfWritePoolThreads, | ||
| randomIntBetween(0, maxUtilizationPercent) / 100f, | ||
| 0 | ||
| ); | ||
| final NodeUsageStatsForThreadPools thirdNodeHotSpottingNodeStats = createNodeUsageStatsForThreadPools( | ||
| thirdDiscoveryNode, | ||
| 2, | ||
| numberOfWritePoolThreads, | ||
| randomUtilizationThresholdPercent + 1 / 100, | ||
|
||
| 0 | ||
| ); | ||
|
|
@@ -197,12 +203,11 @@ public void testHighNodeWriteLoadPreventsNewShardAllocation() { | |
| .getMetadata() | ||
| .getProject() | ||
| .index(indexName); | ||
| double shardWriteLoadDefault = 0.2; | ||
| MockTransportService.getInstance(firstDataNodeName) | ||
| .addRequestHandlingBehavior(IndicesStatsAction.NAME + "[n]", (handler, request, channel, task) -> { | ||
| List<ShardStats> shardStats = new ArrayList<>(indexMetadata.getNumberOfShards()); | ||
| for (int i = 0; i < indexMetadata.getNumberOfShards(); i++) { | ||
| shardStats.add(createShardStats(indexMetadata, i, shardWriteLoadDefault, firstDataNodeId)); | ||
| shardStats.add(createShardStats(indexMetadata, i, shardWriteLoad, firstDataNodeId)); | ||
| } | ||
| TransportIndicesStatsAction instance = internalCluster().getInstance(TransportIndicesStatsAction.class, firstDataNodeName); | ||
| channel.sendResponse(instance.new NodeResponse(firstDataNodeId, indexMetadata.getNumberOfShards(), shardStats, List.of())); | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think the lower bound of the randomization should be 2 since that's a realistic setup.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Lowering the thread count might result in the shards being unable to fit on a single node, I expect. Not sure how much we care about realism. We could lower the shardWriteLoad to 0.001, to remove that concern -- and give the threshold randomization a bigger range of values.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed in ba15f8f