Skip to content

Commit 58d80f3

Browse files
authored
Improve DownsampleActionSingleNodeTests testing (#124550)
By indexing the test data set in multiple bulk requests, which ensure there are multiple segments, which improves testing coverage for DownsampleShardIndexer. We don't catch more complex DownsampleShardIndexer issues if we end up with just one segment. Hence, we need to make the DownsampleShardIndexer test suite a little bit more evil by ensuring we have more than one segment.
1 parent 8da3bea commit 58d80f3

File tree

1 file changed

+24
-18
lines changed

1 file changed

+24
-18
lines changed

x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1120,25 +1120,31 @@ private void bulkIndex(SourceSupplier sourceSupplier) throws IOException {
11201120
}
11211121

11221122
private void bulkIndex(final String indexName, final SourceSupplier sourceSupplier, int docCount) throws IOException {
1123-
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
1124-
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
1125-
for (int i = 0; i < docCount; i++) {
1126-
IndexRequest indexRequest = new IndexRequest(indexName).opType(DocWriteRequest.OpType.CREATE);
1127-
XContentBuilder source = sourceSupplier.get();
1128-
indexRequest.source(source);
1129-
bulkRequestBuilder.add(indexRequest);
1130-
}
1131-
BulkResponse bulkResponse = bulkRequestBuilder.get();
1123+
// Index in such a way that we always have multiple segments, so that we test DownsampleShardIndexer in a more realistic scenario:
1124+
// (also makes failures more reproducible)
11321125
int duplicates = 0;
1133-
for (BulkItemResponse response : bulkResponse.getItems()) {
1134-
if (response.isFailed()) {
1135-
if (response.getFailure().getCause() instanceof VersionConflictEngineException) {
1136-
// A duplicate event was created by random generator. We should not fail for this
1137-
// reason.
1138-
logger.debug("We tried to insert a duplicate: [{}]", response.getFailureMessage());
1139-
duplicates++;
1140-
} else {
1141-
fail("Failed to index data: " + bulkResponse.buildFailureMessage());
1126+
for (int i = 0; i < docCount;) {
1127+
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
1128+
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
1129+
int max = Math.min(i + 100, docCount);
1130+
for (int j = i; j < max; j++) {
1131+
IndexRequest indexRequest = new IndexRequest(indexName).opType(DocWriteRequest.OpType.CREATE);
1132+
XContentBuilder source = sourceSupplier.get();
1133+
indexRequest.source(source);
1134+
bulkRequestBuilder.add(indexRequest);
1135+
}
1136+
i = max;
1137+
BulkResponse bulkResponse = bulkRequestBuilder.get();
1138+
for (BulkItemResponse response : bulkResponse.getItems()) {
1139+
if (response.isFailed()) {
1140+
if (response.getFailure().getCause() instanceof VersionConflictEngineException) {
1141+
// A duplicate event was created by random generator. We should not fail for this
1142+
// reason.
1143+
logger.debug("We tried to insert a duplicate: [{}]", response.getFailureMessage());
1144+
duplicates++;
1145+
} else {
1146+
fail("Failed to index data: " + bulkResponse.buildFailureMessage());
1147+
}
11421148
}
11431149
}
11441150
}

0 commit comments

Comments
 (0)