|
35 | 35 | import org.elasticsearch.common.settings.Settings; |
36 | 36 | import org.elasticsearch.common.util.concurrent.EsExecutors; |
37 | 37 | import org.elasticsearch.common.util.concurrent.ThreadContext; |
| 38 | +import org.elasticsearch.core.SuppressForbidden; |
38 | 39 | import org.elasticsearch.core.TimeValue; |
39 | 40 | import org.elasticsearch.index.IndexService; |
40 | 41 | import org.elasticsearch.index.IndexSettings; |
|
78 | 79 | import org.elasticsearch.xcontent.XContentBuilder; |
79 | 80 |
|
80 | 81 | import java.io.IOException; |
| 82 | +import java.io.UncheckedIOException; |
| 83 | +import java.util.ArrayList; |
| 84 | +import java.util.Collection; |
| 85 | +import java.util.List; |
81 | 86 | import java.util.UUID; |
| 87 | +import java.util.concurrent.Callable; |
| 88 | +import java.util.concurrent.ExecutionException; |
| 89 | +import java.util.concurrent.Executor; |
82 | 90 | import java.util.concurrent.ExecutorService; |
83 | 91 | import java.util.concurrent.Executors; |
| 92 | +import java.util.concurrent.Future; |
| 93 | +import java.util.concurrent.FutureTask; |
| 94 | +import java.util.concurrent.LinkedBlockingQueue; |
| 95 | +import java.util.concurrent.RunnableFuture; |
84 | 96 | import java.util.concurrent.ThreadPoolExecutor; |
| 97 | +import java.util.concurrent.TimeUnit; |
| 98 | +import java.util.concurrent.atomic.AtomicBoolean; |
| 99 | +import java.util.concurrent.atomic.AtomicInteger; |
85 | 100 | import java.util.function.Function; |
86 | 101 | import java.util.function.Supplier; |
87 | 102 | import java.util.function.ToLongFunction; |
88 | 103 |
|
89 | 104 | import static org.hamcrest.Matchers.equalTo; |
| 105 | +import static org.hamcrest.Matchers.greaterThanOrEqualTo; |
90 | 106 | import static org.hamcrest.Matchers.instanceOf; |
91 | 107 | import static org.hamcrest.Matchers.is; |
| 108 | +import static org.hamcrest.Matchers.lessThan; |
92 | 109 | import static org.mockito.ArgumentMatchers.any; |
93 | 110 | import static org.mockito.ArgumentMatchers.anyString; |
94 | 111 | import static org.mockito.ArgumentMatchers.eq; |
@@ -959,11 +976,161 @@ public void testGetFieldCardinalityRuntimeField() { |
959 | 976 | assertEquals(-1, DefaultSearchContext.getFieldCardinality("field", indexService, null)); |
960 | 977 | } |
961 | 978 |
|
| 979 | + public void testSingleThreadNoSearchConcurrency() throws IOException, ExecutionException, InterruptedException { |
| 980 | + // with a single thread in the pool the max number of slices will always be 1, hence we won't provide the executor to the searcher |
| 981 | + int executorPoolSize = 1; |
| 982 | + int numIters = randomIntBetween(10, 50); |
| 983 | + int numSegmentTasks = randomIntBetween(50, 100); |
| 984 | + AtomicInteger completedTasks = new AtomicInteger(0); |
| 985 | + ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(executorPoolSize); |
| 986 | + try { |
| 987 | + doTestSearchConcurrency(executor, numIters, numSegmentTasks, completedTasks); |
| 988 | + } finally { |
| 989 | + terminate(executor); |
| 990 | + } |
| 991 | + // Tasks are still created, but the internal executor is a direct one hence there is no parallelism in practice |
| 992 | + assertEquals((long) numIters * numSegmentTasks + numIters, completedTasks.get()); |
| 993 | + assertEquals(numIters, executor.getCompletedTaskCount()); |
| 994 | + } |
| 995 | + |
| 996 | + @SuppressForbidden(reason = "need to provide queue to ThreadPoolExecutor") |
| 997 | + public void testNoSearchConcurrencyWhenQueueing() throws IOException, ExecutionException, InterruptedException { |
| 998 | + // with multiple threads, but constant queueing, the max number of slices will always be 1, hence we won't provide the |
| 999 | + // executor to the searcher |
| 1000 | + int executorPoolSize = randomIntBetween(2, 5); |
| 1001 | + int numIters = randomIntBetween(10, 50); |
| 1002 | + int numSegmentTasks = randomIntBetween(50, 100); |
| 1003 | + AtomicInteger completedTasks = new AtomicInteger(0); |
| 1004 | + final AtomicBoolean terminating = new AtomicBoolean(false); |
| 1005 | + LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<>() { |
| 1006 | + @Override |
| 1007 | + public int size() { |
| 1008 | + // for the purpose of this test we pretend that we always have more items in the queue than threads, but we need to revert |
| 1009 | + // to normal behaviour to ensure graceful shutdown |
| 1010 | + if (terminating.get()) { |
| 1011 | + return super.size(); |
| 1012 | + } |
| 1013 | + return randomIntBetween(executorPoolSize + 1, Integer.MAX_VALUE); |
| 1014 | + } |
| 1015 | + }; |
| 1016 | + ThreadPoolExecutor executor = new ThreadPoolExecutor(executorPoolSize, executorPoolSize, 0L, TimeUnit.MILLISECONDS, queue); |
| 1017 | + try { |
| 1018 | + doTestSearchConcurrency(executor, numIters, numSegmentTasks, completedTasks); |
| 1019 | + terminating.set(true); |
| 1020 | + } finally { |
| 1021 | + terminate(executor); |
| 1022 | + } |
| 1023 | + // Tasks are still created, but the internal executor is a direct one hence there is no parallelism in practice |
| 1024 | + assertEquals((long) numIters * numSegmentTasks + numIters, completedTasks.get()); |
| 1025 | + assertEquals(numIters, executor.getCompletedTaskCount()); |
| 1026 | + } |
| 1027 | + |
| 1028 | + @SuppressForbidden(reason = "need to provide queue to ThreadPoolExecutor") |
| 1029 | + public void testSearchConcurrencyDoesNotCreateMoreTasksThanThreads() throws Exception { |
| 1030 | + // with multiple threads, but not enough queueing to disable parallelism, we will provide the executor to the searcher |
| 1031 | + int executorPoolSize = randomIntBetween(2, 5); |
| 1032 | + int numIters = randomIntBetween(10, 50); |
| 1033 | + int numSegmentTasks = randomIntBetween(50, 100); |
| 1034 | + AtomicInteger completedTasks = new AtomicInteger(0); |
| 1035 | + final AtomicBoolean terminating = new AtomicBoolean(false); |
| 1036 | + LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<>() { |
| 1037 | + @Override |
| 1038 | + public int size() { |
| 1039 | + int size = super.size(); |
| 1040 | + // for the purpose of this test we pretend that we only ever have as many items in the queue as number of threads, but we |
| 1041 | + // need to revert to normal behaviour to ensure graceful shutdown |
| 1042 | + if (size <= executorPoolSize || terminating.get()) { |
| 1043 | + return size; |
| 1044 | + } |
| 1045 | + return randomIntBetween(0, executorPoolSize); |
| 1046 | + } |
| 1047 | + }; |
| 1048 | + ThreadPoolExecutor executor = new ThreadPoolExecutor(executorPoolSize, executorPoolSize, 0L, TimeUnit.MILLISECONDS, queue); |
| 1049 | + try { |
| 1050 | + doTestSearchConcurrency(executor, numIters, numSegmentTasks, completedTasks); |
| 1051 | + terminating.set(true); |
| 1052 | + } finally { |
| 1053 | + terminate(executor); |
| 1054 | + } |
| 1055 | + // make sure that we do parallelize execution: each operation will use at minimum as many tasks as threads available |
| 1056 | + assertThat(executor.getCompletedTaskCount(), greaterThanOrEqualTo((long) numIters * executorPoolSize)); |
| 1057 | + // while we parallelize we also limit the number of tasks that each searcher submits |
| 1058 | + assertThat(executor.getCompletedTaskCount(), lessThan((long) numIters * numSegmentTasks)); |
| 1059 | + // *3 is just a wild guess to account for tasks that get executed while we are still submitting |
| 1060 | + assertThat(executor.getCompletedTaskCount(), lessThan((long) numIters * executorPoolSize * 3)); |
| 1061 | + } |
| 1062 | + |
| 1063 | + private void doTestSearchConcurrency(ThreadPoolExecutor executor, int numIters, int numSegmentTasks, AtomicInteger completedTasks) |
| 1064 | + throws IOException, ExecutionException, InterruptedException { |
| 1065 | + DefaultSearchContext[] contexts = new DefaultSearchContext[numIters]; |
| 1066 | + for (int i = 0; i < numIters; i++) { |
| 1067 | + contexts[i] = createDefaultSearchContext(executor, randomFrom(SearchService.ResultsType.DFS, SearchService.ResultsType.QUERY)); |
| 1068 | + } |
| 1069 | + List<Future<?>> futures = new ArrayList<>(numIters); |
| 1070 | + try { |
| 1071 | + for (int i = 0; i < numIters; i++) { |
| 1072 | + // simulate multiple concurrent search operations that parallelize each their execution across many segment level tasks |
| 1073 | + // via Lucene's TaskExecutor. Segment level tasks are never rejected (they execute on the caller upon rejection), but |
| 1074 | + // the top-level execute call is subject to rejection once the queue is filled with segment level tasks. That is why |
| 1075 | + // we want to limit the number of tasks that each search can parallelize to |
| 1076 | + // NOTE: DefaultSearchContext does not provide the executor to the searcher once it sees maxPoolSize items in the queue. |
| 1077 | + DefaultSearchContext searchContext = contexts[i]; |
| 1078 | + AtomicInteger segmentTasksCompleted = new AtomicInteger(0); |
| 1079 | + RunnableFuture<Void> task = new FutureTask<>(() -> { |
| 1080 | + Collection<Callable<Void>> tasks = new ArrayList<>(); |
| 1081 | + for (int j = 0; j < numSegmentTasks; j++) { |
| 1082 | + tasks.add(() -> { |
| 1083 | + segmentTasksCompleted.incrementAndGet(); |
| 1084 | + completedTasks.incrementAndGet(); |
| 1085 | + return null; |
| 1086 | + }); |
| 1087 | + } |
| 1088 | + try { |
| 1089 | + searchContext.searcher().getTaskExecutor().invokeAll(tasks); |
| 1090 | + // TODO additional calls to invokeAll |
| 1091 | + |
| 1092 | + // invokeAll is blocking, hence at this point we are done executing all the sub-tasks, but the queue may |
| 1093 | + // still be filled up with no-op leftover tasks |
| 1094 | + assertEquals(numSegmentTasks, segmentTasksCompleted.get()); |
| 1095 | + } catch (IOException e) { |
| 1096 | + throw new UncheckedIOException(e); |
| 1097 | + } finally { |
| 1098 | + completedTasks.incrementAndGet(); |
| 1099 | + } |
| 1100 | + return null; |
| 1101 | + }); |
| 1102 | + futures.add(task); |
| 1103 | + executor.execute(task); |
| 1104 | + } |
| 1105 | + for (Future<?> future : futures) { |
| 1106 | + future.get(); |
| 1107 | + } |
| 1108 | + } finally { |
| 1109 | + for (DefaultSearchContext searchContext : contexts) { |
| 1110 | + searchContext.indexShard().getThreadPool().shutdown(); |
| 1111 | + searchContext.close(); |
| 1112 | + } |
| 1113 | + } |
| 1114 | + } |
| 1115 | + |
| 1116 | + private DefaultSearchContext createDefaultSearchContext(Executor executor, SearchService.ResultsType resultsType) throws IOException { |
| 1117 | + return createDefaultSearchContext(Settings.EMPTY, null, executor, resultsType); |
| 1118 | + } |
| 1119 | + |
962 | 1120 | private DefaultSearchContext createDefaultSearchContext(Settings providedIndexSettings) throws IOException { |
963 | 1121 | return createDefaultSearchContext(providedIndexSettings, null); |
964 | 1122 | } |
965 | 1123 |
|
966 | 1124 | private DefaultSearchContext createDefaultSearchContext(Settings providedIndexSettings, XContentBuilder mappings) throws IOException { |
| 1125 | + return createDefaultSearchContext(providedIndexSettings, mappings, null, randomFrom(SearchService.ResultsType.values())); |
| 1126 | + } |
| 1127 | + |
| 1128 | + private DefaultSearchContext createDefaultSearchContext( |
| 1129 | + Settings providedIndexSettings, |
| 1130 | + XContentBuilder mappings, |
| 1131 | + Executor executor, |
| 1132 | + SearchService.ResultsType resultsType |
| 1133 | + ) throws IOException { |
967 | 1134 | TimeValue timeout = new TimeValue(randomIntBetween(1, 100)); |
968 | 1135 | ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); |
969 | 1136 | when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); |
@@ -1047,9 +1214,9 @@ protected Engine.Searcher acquireSearcherInternal(String source) { |
1047 | 1214 | timeout, |
1048 | 1215 | null, |
1049 | 1216 | false, |
1050 | | - null, |
1051 | | - randomFrom(SearchService.ResultsType.values()), |
1052 | | - randomBoolean(), |
| 1217 | + executor, |
| 1218 | + resultsType, |
| 1219 | + executor != null || randomBoolean(), |
1053 | 1220 | randomInt() |
1054 | 1221 | ); |
1055 | 1222 | } |
|
0 commit comments