Skip to content

Commit 04b7295

Browse files
authored
Improve DownsampleActionSingleNodeTests testing (elastic#124550) (elastic#124559)
By indexing the test data set in multiple bulk requests, which ensure there are multiple segments, which improves testing coverage for DownsampleShardIndexer. We don't catch more complex DownsampleShardIndexer issues if we end up with just one segment. Hence, we need to make the DownsampleShardIndexer test suite a little bit more evil by ensuring we have more than one segment.
1 parent 00b1373 commit 04b7295

File tree

1 file changed

+24
-18
lines changed

1 file changed

+24
-18
lines changed

x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1119,25 +1119,31 @@ private void bulkIndex(SourceSupplier sourceSupplier) throws IOException {
11191119
}
11201120

11211121
private void bulkIndex(final String indexName, final SourceSupplier sourceSupplier, int docCount) throws IOException {
1122-
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
1123-
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
1124-
for (int i = 0; i < docCount; i++) {
1125-
IndexRequest indexRequest = new IndexRequest(indexName).opType(DocWriteRequest.OpType.CREATE);
1126-
XContentBuilder source = sourceSupplier.get();
1127-
indexRequest.source(source);
1128-
bulkRequestBuilder.add(indexRequest);
1129-
}
1130-
BulkResponse bulkResponse = bulkRequestBuilder.get();
1122+
// Index in such a way that we always have multiple segments, so that we test DownsampleShardIndexer in a more realistic scenario:
1123+
// (also makes failures more reproducible)
11311124
int duplicates = 0;
1132-
for (BulkItemResponse response : bulkResponse.getItems()) {
1133-
if (response.isFailed()) {
1134-
if (response.getFailure().getCause() instanceof VersionConflictEngineException) {
1135-
// A duplicate event was created by random generator. We should not fail for this
1136-
// reason.
1137-
logger.debug("We tried to insert a duplicate: [{}]", response.getFailureMessage());
1138-
duplicates++;
1139-
} else {
1140-
fail("Failed to index data: " + bulkResponse.buildFailureMessage());
1125+
for (int i = 0; i < docCount;) {
1126+
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
1127+
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
1128+
int max = Math.min(i + 100, docCount);
1129+
for (int j = i; j < max; j++) {
1130+
IndexRequest indexRequest = new IndexRequest(indexName).opType(DocWriteRequest.OpType.CREATE);
1131+
XContentBuilder source = sourceSupplier.get();
1132+
indexRequest.source(source);
1133+
bulkRequestBuilder.add(indexRequest);
1134+
}
1135+
i = max;
1136+
BulkResponse bulkResponse = bulkRequestBuilder.get();
1137+
for (BulkItemResponse response : bulkResponse.getItems()) {
1138+
if (response.isFailed()) {
1139+
if (response.getFailure().getCause() instanceof VersionConflictEngineException) {
1140+
// A duplicate event was created by random generator. We should not fail for this
1141+
// reason.
1142+
logger.debug("We tried to insert a duplicate: [{}]", response.getFailureMessage());
1143+
duplicates++;
1144+
} else {
1145+
fail("Failed to index data: " + bulkResponse.buildFailureMessage());
1146+
}
11411147
}
11421148
}
11431149
}

0 commit comments

Comments
 (0)