Skip to content

Commit 328ac82

Browse files
do not force merge when indexing
1 parent 12119c5 commit 328ac82

File tree

2 files changed

+46
-10
lines changed

2 files changed

+46
-10
lines changed

server/src/internalClusterTest/java/org/elasticsearch/index/engine/MergeWithLowDiskSpaceIT.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
1515
import org.elasticsearch.cluster.DiskUsageIntegTestCase;
1616
import org.elasticsearch.cluster.metadata.IndexMetadata;
17+
import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings;
1718
import org.elasticsearch.common.settings.Settings;
1819
import org.elasticsearch.common.util.concurrent.EsExecutors;
1920
import org.elasticsearch.index.IndexNotFoundException;
@@ -53,6 +54,10 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
5354
// merges pile up more easily when there's only a few threads executing them
5455
.put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), randomIntBetween(1, 2))
5556
.put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_HIGH_WATERMARK_SETTING.getKey(), MERGE_DISK_HIGH_WATERMARK_BYTES + "b")
57+
// let's not worry about allocation watermarks (e.g. read-only shards) in this test suite
58+
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "0b")
59+
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "0b")
60+
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "0b")
5661
.build();
5762
}
5863

@@ -72,6 +77,7 @@ public void testShardCloseWhenDiskSpaceInsufficient() throws Exception {
7277
false,
7378
false,
7479
false,
80+
false,
7581
IntStream.range(1, randomIntBetween(2, 10))
7682
.mapToObj(i -> prepareIndex(indexName).setSource("field", randomAlphaOfLength(50)))
7783
.toList()
@@ -113,6 +119,7 @@ public void testShardCloseWhenDiskSpaceInsufficient() throws Exception {
113119
false,
114120
false,
115121
false,
122+
false,
116123
IntStream.range(1, randomIntBetween(2, 10))
117124
.mapToObj(i -> prepareIndex(indexName).setSource("another_field", randomAlphaOfLength(50)))
118125
.toList()

test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java

Lines changed: 39 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1917,7 +1917,7 @@ public void indexRandom(boolean forceRefresh, List<IndexRequestBuilder> builders
19171917
* @param builders the documents to index.
19181918
*/
19191919
public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List<IndexRequestBuilder> builders) {
1920-
indexRandom(forceRefresh, dummyDocuments, true, builders);
1920+
indexRandom(forceRefresh, dummyDocuments, true, true, builders);
19211921
}
19221922

19231923
/**
@@ -1927,13 +1927,37 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List<Index
19271927
* segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
19281928
* layout.
19291929
*
1930-
* @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed.
1931-
* @param dummyDocuments if {@code true} some empty dummy documents may be randomly inserted into the document list and deleted once
1932-
* all documents are indexed. This is useful to produce deleted documents on the server side.
1933-
* @param maybeFlush if {@code true} this method may randomly execute full flushes after index operations.
1934-
* @param builders the documents to index.
1930+
* @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed.
1931+
* @param dummyDocuments if {@code true} some empty dummy documents may be randomly inserted into the document list and deleted once
1932+
* all documents are indexed. This is useful to produce deleted documents on the server side.
1933+
* @param maybeFlush if {@code true} this method may randomly execute full flushes after index operations.
1934+
* @param builders the documents to index.
19351935
*/
19361936
public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) {
1937+
indexRandom(forceRefresh, dummyDocuments, maybeFlush, true, builders);
1938+
}
1939+
1940+
/**
1941+
* Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
1942+
* indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document
1943+
* ids or index segment creations. Some features might have bug when a given document is the first or the last in a
1944+
* segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
1945+
* layout.
1946+
*
1947+
* @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed.
1948+
* @param dummyDocuments if {@code true} some empty dummy documents may be randomly inserted into the document list and deleted once
1949+
* all documents are indexed. This is useful to produce deleted documents on the server side.
1950+
* @param maybeFlush if {@code true} this method may randomly execute full flushes after index operations.
1951+
* @param maybeForceMerge if {@code true} this method may randomly execute force merges after index operations.
1952+
* @param builders the documents to index.
1953+
*/
1954+
public void indexRandom(
1955+
boolean forceRefresh,
1956+
boolean dummyDocuments,
1957+
boolean maybeFlush,
1958+
boolean maybeForceMerge,
1959+
List<IndexRequestBuilder> builders
1960+
) {
19371961
Random random = random();
19381962
Set<String> indices = new HashSet<>();
19391963
builders = new ArrayList<>(builders);
@@ -1966,13 +1990,13 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma
19661990
new LatchedActionListener<DocWriteResponse>(ActionListener.noop(), newLatch(inFlightAsyncOperations))
19671991
.delegateResponse((l, e) -> fail(e))
19681992
);
1969-
postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush);
1993+
postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush, maybeForceMerge);
19701994
}
19711995
} else {
19721996
logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false);
19731997
for (IndexRequestBuilder indexRequestBuilder : builders) {
19741998
indexRequestBuilder.get();
1975-
postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush);
1999+
postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush, maybeForceMerge);
19762000
}
19772001
}
19782002
} else {
@@ -2051,7 +2075,12 @@ private static CountDownLatch newLatch(List<CountDownLatch> latches) {
20512075
/**
20522076
* Maybe refresh, force merge, or flush then always make sure there aren't too many in flight async operations.
20532077
*/
2054-
private void postIndexAsyncActions(String[] indices, List<CountDownLatch> inFlightAsyncOperations, boolean maybeFlush) {
2078+
private void postIndexAsyncActions(
2079+
String[] indices,
2080+
List<CountDownLatch> inFlightAsyncOperations,
2081+
boolean maybeFlush,
2082+
boolean maybeForceMerge
2083+
) {
20552084
if (rarely()) {
20562085
if (rarely()) {
20572086
indicesAdmin().prepareRefresh(indices)
@@ -2061,7 +2090,7 @@ private void postIndexAsyncActions(String[] indices, List<CountDownLatch> inFlig
20612090
indicesAdmin().prepareFlush(indices)
20622091
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
20632092
.execute(new LatchedActionListener<>(ActionListener.noop(), newLatch(inFlightAsyncOperations)));
2064-
} else if (rarely()) {
2093+
} else if (maybeForceMerge && rarely()) {
20652094
indicesAdmin().prepareForceMerge(indices)
20662095
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
20672096
.setMaxNumSegments(between(1, 10))

0 commit comments

Comments
 (0)