16
16
import org .elasticsearch .action .support .ActionFilters ;
17
17
import org .elasticsearch .action .support .ChannelActionListener ;
18
18
import org .elasticsearch .action .support .HandledTransportAction ;
19
+ import org .elasticsearch .action .support .RefCountingRunnable ;
19
20
import org .elasticsearch .client .internal .Client ;
20
21
import org .elasticsearch .cluster .ClusterState ;
21
22
import org .elasticsearch .cluster .block .ClusterBlockLevel ;
22
23
import org .elasticsearch .cluster .metadata .IndexNameExpressionResolver ;
23
24
import org .elasticsearch .cluster .service .ClusterService ;
24
25
import org .elasticsearch .common .Strings ;
25
26
import org .elasticsearch .common .inject .Inject ;
26
- import org .elasticsearch .common .util .concurrent .CountDown ;
27
27
import org .elasticsearch .common .util .set .Sets ;
28
28
import org .elasticsearch .core .Tuple ;
29
29
import org .elasticsearch .index .shard .ShardId ;
30
30
import org .elasticsearch .indices .IndicesService ;
31
+ import org .elasticsearch .logging .LogManager ;
32
+ import org .elasticsearch .logging .Logger ;
31
33
import org .elasticsearch .search .SearchService ;
32
34
import org .elasticsearch .tasks .CancellableTask ;
33
35
import org .elasticsearch .tasks .Task ;
47
49
import java .util .Map ;
48
50
import java .util .Optional ;
49
51
import java .util .Set ;
52
+ import java .util .concurrent .atomic .AtomicBoolean ;
53
+ import java .util .function .BiConsumer ;
50
54
import java .util .function .Consumer ;
51
55
import java .util .function .Function ;
52
56
import java .util .stream .Collectors ;
56
60
57
61
public class TransportFieldCapabilitiesAction extends HandledTransportAction <FieldCapabilitiesRequest , FieldCapabilitiesResponse > {
58
62
public static final String ACTION_NODE_NAME = FieldCapabilitiesAction .NAME + "[n]" ;
63
+ public static final Logger LOGGER = LogManager .getLogger (TransportFieldCapabilitiesAction .class );
59
64
60
65
private final ThreadPool threadPool ;
61
66
private final TransportService transportService ;
@@ -116,61 +121,99 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti
116
121
}
117
122
118
123
checkIndexBlocks (clusterState , concreteIndices );
119
-
124
+ final FailureCollector indexFailures = new FailureCollector ();
120
125
final Map <String , FieldCapabilitiesIndexResponse > indexResponses = Collections .synchronizedMap (new HashMap <>());
121
126
// This map is used to share the index response for indices which have the same index mapping hash to reduce the memory usage.
122
127
final Map <String , FieldCapabilitiesIndexResponse > indexMappingHashToResponses = Collections .synchronizedMap (new HashMap <>());
128
+ final Runnable releaseResourcesOnCancel = () -> {
129
+ LOGGER .trace ("clear index responses on cancellation" );
130
+ indexFailures .clear ();
131
+ indexResponses .clear ();
132
+ indexMappingHashToResponses .clear ();
133
+ };
123
134
final Consumer <FieldCapabilitiesIndexResponse > handleIndexResponse = resp -> {
135
+ if (fieldCapTask .isCancelled ()) {
136
+ releaseResourcesOnCancel .run ();
137
+ return ;
138
+ }
124
139
if (resp .canMatch () && resp .getIndexMappingHash () != null ) {
125
140
FieldCapabilitiesIndexResponse curr = indexMappingHashToResponses .putIfAbsent (resp .getIndexMappingHash (), resp );
126
141
if (curr != null ) {
127
142
resp = new FieldCapabilitiesIndexResponse (resp .getIndexName (), curr .getIndexMappingHash (), curr .get (), true );
128
143
}
129
144
}
130
145
indexResponses .putIfAbsent (resp .getIndexName (), resp );
146
+ if (fieldCapTask .isCancelled ()) {
147
+ releaseResourcesOnCancel .run ();
148
+ }
131
149
};
132
- final FailureCollector indexFailures = new FailureCollector ();
133
- // One for each cluster including the local cluster
134
- final CountDown completionCounter = new CountDown (1 + remoteClusterIndices .size ());
135
- final Runnable countDown = createResponseMerger (request , fieldCapTask , completionCounter , indexResponses , indexFailures , listener );
136
- final RequestDispatcher requestDispatcher = new RequestDispatcher (
137
- clusterService ,
138
- transportService ,
139
- task ,
140
- request ,
141
- localIndices ,
142
- nowInMillis ,
143
- concreteIndices ,
144
- threadPool .executor (ThreadPool .Names .SEARCH_COORDINATION ),
145
- handleIndexResponse ,
146
- indexFailures ::collect ,
147
- countDown
148
- );
149
- requestDispatcher .execute ();
150
+ final BiConsumer <String , Exception > handleIndexFailure = (index , error ) -> {
151
+ if (fieldCapTask .isCancelled ()) {
152
+ releaseResourcesOnCancel .run ();
153
+ return ;
154
+ }
155
+ indexFailures .collect (index , error );
156
+ if (fieldCapTask .isCancelled ()) {
157
+ releaseResourcesOnCancel .run ();
158
+ }
159
+ };
160
+ final var finishedOrCancelled = new AtomicBoolean ();
161
+ fieldCapTask .addListener (() -> {
162
+ if (finishedOrCancelled .compareAndSet (false , true )) {
163
+ releaseResourcesOnCancel .run ();
164
+ }
165
+ });
166
+ try (RefCountingRunnable refs = new RefCountingRunnable (() -> {
167
+ finishedOrCancelled .set (true );
168
+ if (fieldCapTask .notifyIfCancelled (listener )) {
169
+ releaseResourcesOnCancel .run ();
170
+ } else {
171
+ mergeIndexResponses (request , fieldCapTask , indexResponses , indexFailures , listener );
172
+ }
173
+ })) {
174
+ // local cluster
175
+ final RequestDispatcher requestDispatcher = new RequestDispatcher (
176
+ clusterService ,
177
+ transportService ,
178
+ task ,
179
+ request ,
180
+ localIndices ,
181
+ nowInMillis ,
182
+ concreteIndices ,
183
+ threadPool .executor (ThreadPool .Names .SEARCH_COORDINATION ),
184
+ handleIndexResponse ,
185
+ handleIndexFailure ,
186
+ refs .acquire ()::close
187
+ );
188
+ requestDispatcher .execute ();
150
189
151
- // this is the cross cluster part of this API - we force the other cluster to not merge the results but instead
152
- // send us back all individual index results.
153
- for (Map .Entry <String , OriginalIndices > remoteIndices : remoteClusterIndices .entrySet ()) {
154
- String clusterAlias = remoteIndices .getKey ();
155
- OriginalIndices originalIndices = remoteIndices .getValue ();
156
- Client remoteClusterClient = transportService .getRemoteClusterService ().getRemoteClusterClient (threadPool , clusterAlias );
157
- FieldCapabilitiesRequest remoteRequest = prepareRemoteRequest (request , originalIndices , nowInMillis );
158
- remoteClusterClient .fieldCaps (remoteRequest , ActionListener .wrap (response -> {
159
- for (FieldCapabilitiesIndexResponse resp : response .getIndexResponses ()) {
160
- String indexName = RemoteClusterAware .buildRemoteIndexName (clusterAlias , resp .getIndexName ());
161
- handleIndexResponse .accept (
162
- new FieldCapabilitiesIndexResponse (indexName , resp .getIndexMappingHash (), resp .get (), resp .canMatch ())
163
- );
164
- }
165
- for (FieldCapabilitiesFailure failure : response .getFailures ()) {
166
- Exception ex = failure .getException ();
167
- indexFailures .collectRemoteException (ex , clusterAlias , failure .getIndices ());
168
- }
169
- countDown .run ();
170
- }, ex -> {
171
- indexFailures .collectRemoteException (ex , clusterAlias , originalIndices .indices ());
172
- countDown .run ();
173
- }));
190
+ // this is the cross cluster part of this API - we force the other cluster to not merge the results but instead
191
+ // send us back all individual index results.
192
+ for (Map .Entry <String , OriginalIndices > remoteIndices : remoteClusterIndices .entrySet ()) {
193
+ String clusterAlias = remoteIndices .getKey ();
194
+ OriginalIndices originalIndices = remoteIndices .getValue ();
195
+ Client remoteClusterClient = transportService .getRemoteClusterService ().getRemoteClusterClient (threadPool , clusterAlias );
196
+ FieldCapabilitiesRequest remoteRequest = prepareRemoteRequest (request , originalIndices , nowInMillis );
197
+ ActionListener <FieldCapabilitiesResponse > remoteListener = ActionListener .wrap (response -> {
198
+ for (FieldCapabilitiesIndexResponse resp : response .getIndexResponses ()) {
199
+ String indexName = RemoteClusterAware .buildRemoteIndexName (clusterAlias , resp .getIndexName ());
200
+ handleIndexResponse .accept (
201
+ new FieldCapabilitiesIndexResponse (indexName , resp .getIndexMappingHash (), resp .get (), resp .canMatch ())
202
+ );
203
+ }
204
+ for (FieldCapabilitiesFailure failure : response .getFailures ()) {
205
+ Exception ex = failure .getException ();
206
+ for (String index : failure .getIndices ()) {
207
+ handleIndexFailure .accept (RemoteClusterAware .buildRemoteIndexName (clusterAlias , index ), ex );
208
+ }
209
+ }
210
+ }, ex -> {
211
+ for (String index : originalIndices .indices ()) {
212
+ handleIndexFailure .accept (RemoteClusterAware .buildRemoteIndexName (clusterAlias , index ), ex );
213
+ }
214
+ });
215
+ remoteClusterClient .fieldCaps (remoteRequest , ActionListener .releaseAfter (remoteListener , refs .acquire ()));
216
+ }
174
217
}
175
218
}
176
219
@@ -181,39 +224,32 @@ private static void checkIndexBlocks(ClusterState clusterState, String[] concret
181
224
}
182
225
}
183
226
184
- private Runnable createResponseMerger (
227
+ private void mergeIndexResponses (
185
228
FieldCapabilitiesRequest request ,
186
229
CancellableTask task ,
187
- CountDown completionCounter ,
188
230
Map <String , FieldCapabilitiesIndexResponse > indexResponses ,
189
231
FailureCollector indexFailures ,
190
232
ActionListener <FieldCapabilitiesResponse > listener
191
233
) {
192
- return () -> {
193
- if (completionCounter .countDown ()) {
194
- List <FieldCapabilitiesFailure > failures = indexFailures .build (indexResponses .keySet ());
195
- if (indexResponses .size () > 0 ) {
196
- if (request .isMergeResults ()) {
197
- // fork off to the management pool for merging the responses as the operation can run for longer than is acceptable
198
- // on a transport thread in case of large numbers of indices and/or fields
199
- threadPool .executor (ThreadPool .Names .SEARCH_COORDINATION )
200
- .submit (ActionRunnable .supply (listener , () -> merge (indexResponses , task , request , new ArrayList <>(failures ))));
201
- } else {
202
- listener .onResponse (
203
- new FieldCapabilitiesResponse (new ArrayList <>(indexResponses .values ()), new ArrayList <>(failures ))
204
- );
205
- }
206
- } else {
207
- // we have no responses at all, maybe because of errors
208
- if (indexFailures .isEmpty () == false ) {
209
- // throw back the first exception
210
- listener .onFailure (failures .iterator ().next ().getException ());
211
- } else {
212
- listener .onResponse (new FieldCapabilitiesResponse (Collections .emptyList (), Collections .emptyList ()));
213
- }
214
- }
234
+ List <FieldCapabilitiesFailure > failures = indexFailures .build (indexResponses .keySet ());
235
+ if (indexResponses .size () > 0 ) {
236
+ if (request .isMergeResults ()) {
237
+ // fork off to the management pool for merging the responses as the operation can run for longer than is acceptable
238
+ // on a transport thread in case of large numbers of indices and/or fields
239
+ threadPool .executor (ThreadPool .Names .SEARCH_COORDINATION )
240
+ .submit (ActionRunnable .supply (listener , () -> merge (indexResponses , task , request , new ArrayList <>(failures ))));
241
+ } else {
242
+ listener .onResponse (new FieldCapabilitiesResponse (new ArrayList <>(indexResponses .values ()), new ArrayList <>(failures )));
215
243
}
216
- };
244
+ } else {
245
+ // we have no responses at all, maybe because of errors
246
+ if (indexFailures .isEmpty () == false ) {
247
+ // throw back the first exception
248
+ listener .onFailure (failures .iterator ().next ().getException ());
249
+ } else {
250
+ listener .onResponse (new FieldCapabilitiesResponse (Collections .emptyList (), Collections .emptyList ()));
251
+ }
252
+ }
217
253
}
218
254
219
255
private static FieldCapabilitiesRequest prepareRemoteRequest (
@@ -380,10 +416,8 @@ void collect(String index, Exception e) {
380
416
failuresByIndex .putIfAbsent (index , e );
381
417
}
382
418
383
- void collectRemoteException (Exception ex , String clusterAlias , String [] remoteIndices ) {
384
- for (String failedIndex : remoteIndices ) {
385
- collect (RemoteClusterAware .buildRemoteIndexName (clusterAlias , failedIndex ), ex );
386
- }
419
+ void clear () {
420
+ failuresByIndex .clear ();
387
421
}
388
422
389
423
boolean isEmpty () {
0 commit comments