-
Notifications
You must be signed in to change notification settings - Fork 25.6k
Configurable limit on concurrent shard closing #121267
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
4a93d27
1ac59d6
934f2b3
9605f46
d1fd519
e8d2f5a
c7476bf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -116,6 +116,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple | |
| Setting.Property.NodeScope | ||
| ); | ||
|
|
||
| public static final Setting<Integer> CONCURRENT_SHARD_CLOSE_LIMIT = Setting.intSetting( | ||
| "indices.store.max_concurrent_closing_shards", | ||
| settings -> Integer.toString(Math.min(10, EsExecutors.NODE_PROCESSORS_SETTING.get(settings).roundUp())), | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Previously the default max was Note
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, it was a (my) mistake to use |
||
| 1, | ||
| Integer.MAX_VALUE, | ||
| Setting.Property.NodeScope | ||
| ); | ||
|
|
||
| final AllocatedIndices<? extends Shard, ? extends AllocatedIndex<? extends Shard>> indicesService; | ||
| private final ClusterService clusterService; | ||
| private final ThreadPool threadPool; | ||
|
|
@@ -1347,7 +1355,7 @@ enum IndexRemovalReason { | |
| } | ||
| } | ||
|
|
||
| private static class ShardCloseExecutor implements Executor { | ||
| static class ShardCloseExecutor implements Executor { | ||
|
|
||
| private final ThrottledTaskRunner throttledTaskRunner; | ||
|
|
||
|
|
@@ -1360,8 +1368,11 @@ private static class ShardCloseExecutor implements Executor { | |
| // can't close the old ones down fast enough. Maybe we could block or throttle new shards starting while old shards are still | ||
| // shutting down, given that starting new shards is already async. Since this seems unlikely in practice, we opt for the simple | ||
| // approach here. | ||
| final var maxThreads = Math.max(EsExecutors.NODE_PROCESSORS_SETTING.get(settings).roundUp(), 10); | ||
| throttledTaskRunner = new ThrottledTaskRunner(IndicesClusterStateService.class.getCanonicalName(), maxThreads, delegate); | ||
| throttledTaskRunner = new ThrottledTaskRunner( | ||
| IndicesClusterStateService.class.getCanonicalName(), | ||
| CONCURRENT_SHARD_CLOSE_LIMIT.get(settings), | ||
| delegate | ||
| ); | ||
| } | ||
|
|
||
| @Override | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,62 @@ | ||
| /* | ||
| * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one | ||
| * or more contributor license agreements. Licensed under the "Elastic License | ||
| * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side | ||
| * Public License v 1"; you may not use this file except in compliance with, at | ||
| * your election, the "Elastic License 2.0", the "GNU Affero General Public | ||
| * License v3.0 only", or the "Server Side Public License, v 1". | ||
| */ | ||
|
|
||
| package org.elasticsearch.indices.cluster; | ||
|
|
||
| import org.elasticsearch.common.settings.Settings; | ||
| import org.elasticsearch.common.util.concurrent.EsExecutors; | ||
| import org.elasticsearch.test.ESTestCase; | ||
|
|
||
| import java.util.ArrayList; | ||
| import java.util.concurrent.atomic.AtomicInteger; | ||
|
|
||
| public class ShardCloseExecutorTests extends ESTestCase { | ||
|
|
||
| public void testThrottling() { | ||
| final var defaultProcessors = EsExecutors.NODE_PROCESSORS_SETTING.get(Settings.EMPTY).roundUp(); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What are the expectations around the value of
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It is the number of CPUs of the machine on which the tests are running, so can be more or less than 10. And it's not permitted to increase |
||
| ensureThrottling(Math.min(10, defaultProcessors), Settings.EMPTY); | ||
|
|
||
| if (10 < defaultProcessors) { | ||
| ensureThrottling( | ||
| 10, | ||
| Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), between(10, defaultProcessors - 1)).build() | ||
| ); | ||
| } | ||
|
|
||
| if (1 < defaultProcessors) { | ||
| final var fewProcessors = between(1, Math.min(10, defaultProcessors - 1)); | ||
| ensureThrottling(fewProcessors, Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), fewProcessors).build()); | ||
| } | ||
|
|
||
| final var override = between(1, defaultProcessors * 2); | ||
| ensureThrottling( | ||
| override, | ||
| Settings.builder().put(IndicesClusterStateService.CONCURRENT_SHARD_CLOSE_LIMIT.getKey(), override).build() | ||
| ); | ||
| } | ||
|
|
||
| private static void ensureThrottling(int expectedLimit, Settings settings) { | ||
| final var tasksToRun = new ArrayList<Runnable>(expectedLimit + 1); | ||
| final var executor = new IndicesClusterStateService.ShardCloseExecutor(settings, tasksToRun::add); | ||
| final var runCount = new AtomicInteger(); | ||
|
|
||
| for (int i = 0; i < expectedLimit + 1; i++) { | ||
| executor.execute(runCount::incrementAndGet); | ||
| } | ||
|
|
||
| assertEquals(expectedLimit, tasksToRun.size()); // didn't enqueue the final task yet | ||
|
|
||
| for (int i = 0; i < tasksToRun.size(); i++) { | ||
|
||
| assertEquals(i, runCount.get()); | ||
| tasksToRun.get(i).run(); | ||
| assertEquals(i + 1, runCount.get()); | ||
| assertEquals(expectedLimit + 1, tasksToRun.size()); | ||
| } | ||
| } | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.