Skip to content

Commit 646bdbe

Browse files
committed
Changes
1 parent 9946884 commit 646bdbe

File tree

3 files changed

+218
-168
lines changed

3 files changed

+218
-168
lines changed
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.action.bulk;
11+
12+
import org.elasticsearch.action.DocWriteRequest;
13+
import org.elasticsearch.cluster.metadata.ProjectMetadata;
14+
import org.elasticsearch.cluster.routing.IndexRouting;
15+
import org.elasticsearch.core.Tuple;
16+
import org.elasticsearch.index.Index;
17+
import org.elasticsearch.index.shard.ShardId;
18+
19+
import java.util.ArrayList;
20+
import java.util.HashMap;
21+
import java.util.List;
22+
import java.util.Map;
23+
24+
public final class ShardBulkSplitHelper {
25+
26+
private ShardBulkSplitHelper() {}
27+
28+
public static Map<ShardId, BulkShardRequest> splitRequests(BulkShardRequest request, ProjectMetadata project) {
29+
final ShardId sourceShardId = request.shardId();
30+
final Index index = sourceShardId.getIndex();
31+
IndexRouting indexRouting = IndexRouting.fromIndexMetadata(project.getIndexSafe(index));
32+
33+
Map<ShardId, List<BulkItemRequest>> requestsByShard = new HashMap<>();
34+
Map<ShardId, BulkShardRequest> bulkRequestsPerShard = new HashMap<>();
35+
36+
// Iterate through the items in the input request and split them based on the
37+
// current resharding-split state.
38+
BulkItemRequest[] items = request.items();
39+
if (items.length == 0) { // Nothing to split
40+
return Map.of(sourceShardId, request);
41+
}
42+
43+
for (BulkItemRequest bulkItemRequest : items) {
44+
DocWriteRequest<?> docWriteRequest = bulkItemRequest.request();
45+
int newShardId = docWriteRequest.rerouteAtSourceDuringResharding(indexRouting);
46+
List<BulkItemRequest> shardRequests = requestsByShard.computeIfAbsent(
47+
new ShardId(index, newShardId),
48+
shardNum -> new ArrayList<>()
49+
);
50+
shardRequests.add(new BulkItemRequest(bulkItemRequest.id(), bulkItemRequest.request()));
51+
}
52+
53+
// All items belong to either the source shard or target shard.
54+
if (requestsByShard.size() == 1) {
55+
Map.Entry<ShardId, List<BulkItemRequest>> entry = requestsByShard.entrySet().iterator().next();
56+
// Return the original request if no items were split to target.
57+
if (entry.getKey().equals(sourceShardId)) {
58+
return Map.of(sourceShardId, request);
59+
}
60+
}
61+
62+
for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) {
63+
final ShardId shardId = entry.getKey();
64+
final List<BulkItemRequest> requests = entry.getValue();
65+
BulkShardRequest bulkShardRequest = new BulkShardRequest(
66+
shardId,
67+
request.getRefreshPolicy(),
68+
requests.toArray(new BulkItemRequest[0]),
69+
request.isSimulated()
70+
);
71+
bulkRequestsPerShard.put(shardId, bulkShardRequest);
72+
}
73+
return bulkRequestsPerShard;
74+
}
75+
76+
public static Tuple<BulkShardResponse, Exception> combineResponses(
77+
BulkShardRequest originalRequest,
78+
Map<ShardId, BulkShardRequest> splitRequests,
79+
Map<ShardId, Tuple<BulkShardResponse, Exception>> responses
80+
) {
81+
BulkItemResponse[] bulkItemResponses = new BulkItemResponse[originalRequest.items().length];
82+
for (Map.Entry<ShardId, Tuple<BulkShardResponse, Exception>> entry : responses.entrySet()) {
83+
ShardId shardId = entry.getKey();
84+
Tuple<BulkShardResponse, Exception> value = entry.getValue();
85+
Exception exception = value.v2();
86+
if (exception != null) {
87+
BulkShardRequest bulkShardRequest = splitRequests.get(shardId);
88+
for (BulkItemRequest item : bulkShardRequest.items()) {
89+
DocWriteRequest<?> request = item.request();
90+
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(item.index(), request.id(), exception);
91+
bulkItemResponses[item.id()] = BulkItemResponse.failure(item.id(), request.opType(), failure);
92+
}
93+
} else {
94+
for (BulkItemResponse bulkItemResponse : value.v1().getResponses()) {
95+
bulkItemResponses[bulkItemResponse.getItemId()] = bulkItemResponse;
96+
}
97+
}
98+
}
99+
BulkShardResponse bulkShardResponse = new BulkShardResponse(originalRequest.shardId(), bulkItemResponses);
100+
// TODO: Decide how to handle
101+
bulkShardResponse.setShardInfo(responses.get(originalRequest.shardId()).v1().getShardInfo());
102+
return new Tuple<>(bulkShardResponse, null);
103+
}
104+
}

server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java

Lines changed: 3 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535
import org.elasticsearch.cluster.action.shard.ShardStateAction;
3636
import org.elasticsearch.cluster.metadata.ProjectMetadata;
3737
import org.elasticsearch.cluster.project.ProjectResolver;
38-
import org.elasticsearch.cluster.routing.IndexRouting;
3938
import org.elasticsearch.cluster.service.ClusterService;
4039
import org.elasticsearch.common.bytes.BytesReference;
4140
import org.elasticsearch.common.compress.CompressedXContent;
@@ -46,7 +45,6 @@
4645
import org.elasticsearch.core.Strings;
4746
import org.elasticsearch.core.TimeValue;
4847
import org.elasticsearch.core.Tuple;
49-
import org.elasticsearch.index.Index;
5048
import org.elasticsearch.index.IndexingPressure;
5149
import org.elasticsearch.index.engine.Engine;
5250
import org.elasticsearch.index.engine.VersionConflictEngineException;
@@ -74,9 +72,6 @@
7472
import org.elasticsearch.xcontent.XContentType;
7573

7674
import java.io.IOException;
77-
import java.util.ArrayList;
78-
import java.util.HashMap;
79-
import java.util.List;
8075
import java.util.Map;
8176
import java.util.concurrent.Executor;
8277
import java.util.concurrent.TimeUnit;
@@ -175,56 +170,11 @@ protected void shardOperationOnPrimary(
175170
@Override
176171
protected Map<ShardId, BulkShardRequest> splitRequestOnPrimary(BulkShardRequest request) {
177172
// TODO Needed right now for not in primary mode on the target. Need to make sure we handle that with retries.
178-
LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
179-
final ShardId sourceShardId = request.shardId();
180-
final Index index = sourceShardId.getIndex();
181-
173+
// LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
182174
ClusterState clusterState = clusterService.state();
183175
ProjectMetadata project = projectResolver.getProjectMetadata(clusterState);
184176

185-
IndexRouting indexRouting = IndexRouting.fromIndexMetadata(project.getIndexSafe(index));
186-
Map<ShardId, List<BulkItemRequest>> requestsByShard = new HashMap<>();
187-
Map<ShardId, BulkShardRequest> bulkRequestsPerShard = new HashMap<>();
188-
189-
// Iterate through the items in the input request and split them based on the
190-
// current resharding-split state.
191-
BulkItemRequest[] items = request.items();
192-
if (items.length == 0) { // Nothing to split
193-
return Map.of(sourceShardId, request);
194-
}
195-
196-
for (int i = 0; i < items.length; i++) {
197-
BulkItemRequest bulkItemRequest = items[i];
198-
DocWriteRequest<?> docWriteRequest = bulkItemRequest.request();
199-
int newShardId = docWriteRequest.rerouteAtSourceDuringResharding(indexRouting);
200-
List<BulkItemRequest> shardRequests = requestsByShard.computeIfAbsent(
201-
new ShardId(index, newShardId),
202-
shardNum -> new ArrayList<>()
203-
);
204-
shardRequests.add(new BulkItemRequest(bulkItemRequest.id(), bulkItemRequest.request()));
205-
}
206-
207-
// All items belong to either the source shard or target shard.
208-
if (requestsByShard.size() == 1) {
209-
Map.Entry<ShardId, List<BulkItemRequest>> entry = requestsByShard.entrySet().iterator().next();
210-
// Return the original request if no items were split to target.
211-
if (entry.getKey().equals(sourceShardId)) {
212-
return Map.of(sourceShardId, request);
213-
}
214-
}
215-
216-
for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) {
217-
final ShardId shardId = entry.getKey();
218-
final List<BulkItemRequest> requests = entry.getValue();
219-
BulkShardRequest bulkShardRequest = new BulkShardRequest(
220-
shardId,
221-
request.getRefreshPolicy(),
222-
requests.toArray(new BulkItemRequest[0]),
223-
request.isSimulated()
224-
);
225-
bulkRequestsPerShard.put(shardId, bulkShardRequest);
226-
}
227-
return bulkRequestsPerShard;
177+
return ShardBulkSplitHelper.splitRequests(request, project);
228178
}
229179

230180
@Override
@@ -233,27 +183,7 @@ protected Tuple<BulkShardResponse, Exception> combineSplitResponses(
233183
Map<ShardId, BulkShardRequest> splitRequests,
234184
Map<ShardId, Tuple<BulkShardResponse, Exception>> responses
235185
) {
236-
BulkItemResponse[] bulkItemResponses = new BulkItemResponse[originalRequest.items().length];
237-
for (Map.Entry<ShardId, Tuple<BulkShardResponse, Exception>> entry : responses.entrySet()) {
238-
ShardId shardId = entry.getKey();
239-
Tuple<BulkShardResponse, Exception> value = entry.getValue();
240-
Exception exception = value.v2();
241-
if (exception != null) {
242-
BulkShardRequest bulkShardRequest = splitRequests.get(shardId);
243-
for (BulkItemRequest item : bulkShardRequest.items()) {
244-
DocWriteRequest<?> request = item.request();
245-
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(item.index(), request.id(), exception);
246-
bulkItemResponses[item.id()] = BulkItemResponse.failure(item.id(), request.opType(), failure);
247-
}
248-
} else {
249-
for (BulkItemResponse bulkItemResponse : value.v1().getResponses()) {
250-
bulkItemResponses[bulkItemResponse.getItemId()] = bulkItemResponse;
251-
}
252-
}
253-
}
254-
BulkShardResponse bulkShardResponse = new BulkShardResponse(originalRequest.shardId(), bulkItemResponses);
255-
bulkShardResponse.setShardInfo(responses.get(originalRequest.shardId()).v1().getShardInfo());
256-
return new Tuple<>(bulkShardResponse, null);
186+
return ShardBulkSplitHelper.combineResponses(originalRequest, splitRequests, responses);
257187
}
258188

259189
@Override

0 commit comments

Comments
 (0)