13
13
import org .apache .lucene .search .ScoreMode ;
14
14
import org .apache .lucene .util .BytesRef ;
15
15
import org .elasticsearch .ElasticsearchException ;
16
- import org .elasticsearch .action .bulk .BackoffPolicy ;
17
16
import org .elasticsearch .action .bulk .BulkItemResponse ;
18
- import org .elasticsearch .action .bulk .BulkProcessor ;
17
+ import org .elasticsearch .action .bulk .BulkProcessor2 ;
19
18
import org .elasticsearch .action .bulk .BulkRequest ;
20
19
import org .elasticsearch .action .bulk .BulkResponse ;
21
20
import org .elasticsearch .action .index .IndexRequestBuilder ;
@@ -75,6 +74,7 @@ class RollupShardIndexer {
75
74
private static final Logger logger = LogManager .getLogger (RollupShardIndexer .class );
76
75
public static final int ROLLUP_BULK_ACTIONS = 10000 ;
77
76
public static final ByteSizeValue ROLLUP_BULK_SIZE = new ByteSizeValue (1 , ByteSizeUnit .MB );
77
+ public static final ByteSizeValue ROLLUP_MAX_BYTES_IN_FLIGHT = new ByteSizeValue (50 , ByteSizeUnit .MB );
78
78
79
79
private final IndexShard indexShard ;
80
80
private final Client client ;
@@ -87,6 +87,8 @@ class RollupShardIndexer {
87
87
private final List <FieldValueFetcher > fieldValueFetchers ;
88
88
private final RollupShardTask task ;
89
89
private volatile boolean abort = false ;
90
+ ByteSizeValue rollupBulkSize = ROLLUP_BULK_SIZE ;
91
+ ByteSizeValue rollupMaxBytesInFlight = ROLLUP_MAX_BYTES_IN_FLIGHT ;
90
92
91
93
RollupShardIndexer (
92
94
RollupShardTask task ,
@@ -129,7 +131,7 @@ class RollupShardIndexer {
129
131
130
132
public DownsampleIndexerAction .ShardDownsampleResponse execute () throws IOException {
131
133
long startTime = System .currentTimeMillis ();
132
- BulkProcessor bulkProcessor = createBulkProcessor ();
134
+ BulkProcessor2 bulkProcessor = createBulkProcessor ();
133
135
try (searcher ; bulkProcessor ) {
134
136
final TimeSeriesIndexSearcher timeSeriesSearcher = new TimeSeriesIndexSearcher (searcher , List .of (this ::checkCancelled ));
135
137
TimeSeriesBucketCollector bucketCollector = new TimeSeriesBucketCollector (bulkProcessor );
@@ -160,6 +162,18 @@ public DownsampleIndexerAction.ShardDownsampleResponse execute() throws IOExcept
160
162
);
161
163
}
162
164
165
+ if (task .getNumFailed () > 0 ) {
166
+ throw new ElasticsearchException (
167
+ "Shard ["
168
+ + indexShard .shardId ()
169
+ + "] failed to index all rollup documents. Sent ["
170
+ + task .getNumSent ()
171
+ + "], failed ["
172
+ + task .getNumFailed ()
173
+ + "]."
174
+ );
175
+ }
176
+
163
177
return new DownsampleIndexerAction .ShardDownsampleResponse (indexShard .shardId (), task .getNumIndexed ());
164
178
}
165
179
@@ -176,8 +190,8 @@ private void checkCancelled() {
176
190
}
177
191
}
178
192
179
- private BulkProcessor createBulkProcessor () {
180
- final BulkProcessor .Listener listener = new BulkProcessor .Listener () {
193
+ private BulkProcessor2 createBulkProcessor () {
194
+ final BulkProcessor2 .Listener listener = new BulkProcessor2 .Listener () {
181
195
@ Override
182
196
public void beforeBulk (long executionId , BulkRequest request ) {
183
197
task .addNumSent (request .numberOfActions ());
@@ -206,7 +220,7 @@ public void afterBulk(long executionId, BulkRequest request, BulkResponse respon
206
220
}
207
221
208
222
@ Override
209
- public void afterBulk (long executionId , BulkRequest request , Throwable failure ) {
223
+ public void afterBulk (long executionId , BulkRequest request , Exception failure ) {
210
224
if (failure != null ) {
211
225
long items = request .numberOfActions ();
212
226
task .addNumFailed (items );
@@ -218,24 +232,23 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure)
218
232
}
219
233
};
220
234
221
- return BulkProcessor .builder (client ::bulk , listener , "rollup-shard-indexer" )
235
+ return BulkProcessor2 .builder (client ::bulk , listener , client . threadPool () )
222
236
.setBulkActions (ROLLUP_BULK_ACTIONS )
223
237
.setBulkSize (ROLLUP_BULK_SIZE )
224
- // execute the bulk request on the same thread
225
- .setConcurrentRequests (0 )
226
- .setBackoffPolicy (BackoffPolicy .exponentialBackoff (TimeValue .timeValueMillis (1000 ), 3 ))
238
+ .setMaxBytesInFlight (rollupMaxBytesInFlight )
239
+ .setMaxNumberOfRetries (3 )
227
240
.build ();
228
241
}
229
242
230
243
private class TimeSeriesBucketCollector extends BucketCollector {
231
- private final BulkProcessor bulkProcessor ;
244
+ private final BulkProcessor2 bulkProcessor ;
232
245
private final RollupBucketBuilder rollupBucketBuilder ;
233
246
private long docsProcessed ;
234
247
private long bucketsCreated ;
235
248
long lastTimestamp = Long .MAX_VALUE ;
236
249
long lastHistoTimestamp = Long .MAX_VALUE ;
237
250
238
- TimeSeriesBucketCollector (BulkProcessor bulkProcessor ) {
251
+ TimeSeriesBucketCollector (BulkProcessor2 bulkProcessor ) {
239
252
this .bulkProcessor = bulkProcessor ;
240
253
List <AbstractDownsampleFieldProducer > rollupFieldProducers = fieldValueFetchers .stream ()
241
254
.map (FieldValueFetcher ::rollupFieldProducer )
@@ -336,7 +349,7 @@ private void indexBucket(XContentBuilder doc) {
336
349
if (logger .isTraceEnabled ()) {
337
350
logger .trace ("Indexing rollup doc: [{}]" , Strings .toString (doc ));
338
351
}
339
- bulkProcessor .add (request .request ());
352
+ bulkProcessor .addWithBackpressure (request .request (), () -> abort );
340
353
}
341
354
342
355
@ Override
@@ -352,7 +365,6 @@ public void postCollection() throws IOException {
352
365
XContentBuilder doc = rollupBucketBuilder .buildRollupDocument ();
353
366
indexBucket (doc );
354
367
}
355
- bulkProcessor .flush ();
356
368
357
369
// check cancel after the flush all data
358
370
checkCancelled ();
0 commit comments