|
12 | 12 | import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; |
13 | 13 | import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; |
14 | 14 | import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; |
| 15 | +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; |
| 16 | +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; |
| 17 | +import org.elasticsearch.action.admin.indices.shrink.ResizeType; |
15 | 18 | import org.elasticsearch.action.admin.indices.stats.ShardStats; |
16 | 19 | import org.elasticsearch.action.support.ActionTestUtils; |
| 20 | +import org.elasticsearch.action.support.ActiveShardCount; |
| 21 | +import org.elasticsearch.action.support.SubscribableListener; |
17 | 22 | import org.elasticsearch.cluster.ClusterInfoService; |
18 | 23 | import org.elasticsearch.cluster.ClusterInfoServiceUtils; |
19 | 24 | import org.elasticsearch.cluster.DiskUsageIntegTestCase; |
20 | 25 | import org.elasticsearch.cluster.InternalClusterInfoService; |
| 26 | +import org.elasticsearch.cluster.metadata.IndexMetadata; |
21 | 27 | import org.elasticsearch.cluster.routing.IndexRoutingTable; |
22 | 28 | import org.elasticsearch.cluster.routing.IndexShardRoutingTable; |
23 | 29 | import org.elasticsearch.cluster.routing.ShardRouting; |
|
53 | 59 | import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; |
54 | 60 | import static org.elasticsearch.index.store.Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING; |
55 | 61 | import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; |
| 62 | +import static org.hamcrest.Matchers.allOf; |
56 | 63 | import static org.hamcrest.Matchers.contains; |
57 | 64 | import static org.hamcrest.Matchers.empty; |
58 | 65 | import static org.hamcrest.Matchers.equalTo; |
@@ -103,6 +110,62 @@ public void testHighWatermarkNotExceeded() throws Exception { |
103 | 110 | assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getSmallestShardIds()))); |
104 | 111 | } |
105 | 112 |
|
| 113 | + public void testAllocateCloneIgnoresLowWatermark() throws Exception { |
| 114 | + final var lowWatermarkBytes = randomLongBetween(WATERMARK_BYTES + 1, WATERMARK_BYTES * 5); |
| 115 | + |
| 116 | + internalCluster().startMasterOnlyNode( |
| 117 | + Settings.builder() |
| 118 | + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), lowWatermarkBytes + "b") |
| 119 | + .build() |
| 120 | + ); |
| 121 | + final var dataNodeName = internalCluster().startDataOnlyNode(); |
| 122 | + ensureStableCluster(2); |
| 123 | + |
| 124 | + final InternalClusterInfoService clusterInfoService = getInternalClusterInfoService(); |
| 125 | + internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { |
| 126 | + ClusterInfoServiceUtils.refresh(clusterInfoService); |
| 127 | + }); |
| 128 | + |
| 129 | + final var sourceIndexName = "source-" + randomIdentifier(); |
| 130 | + createIndex(sourceIndexName, indexSettings(1, 0).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build()); |
| 131 | + final var shardSizes = createReasonableSizedShards(sourceIndexName); |
| 132 | + |
| 133 | + updateIndexSettings(Settings.builder().put("blocks.write", true), sourceIndexName); |
| 134 | + |
| 135 | + final var totalSpace = randomLongBetween( |
| 136 | + /* do not exceed the high watermark */ |
| 137 | + shardSizes.getSmallestShardSize() + WATERMARK_BYTES + 1, |
| 138 | + /* but make it so that naively duplicating the shard would exceed the low watermark, or else it's not a meaningful test */ |
| 139 | + 2 * shardSizes.getSmallestShardSize() + lowWatermarkBytes |
| 140 | + ); |
| 141 | + |
| 142 | + getTestFileStore(dataNodeName).setTotalSpace(totalSpace); |
| 143 | + refreshDiskUsage(); |
| 144 | + |
| 145 | + final var targetIndexName = "target-" + randomIdentifier(); |
| 146 | + final var resizeRequest = new ResizeRequest(targetIndexName, sourceIndexName); |
| 147 | + resizeRequest.setResizeType(ResizeType.CLONE); |
| 148 | + resizeRequest.masterNodeTimeout(TEST_REQUEST_TIMEOUT); |
| 149 | + resizeRequest.ackTimeout(TEST_REQUEST_TIMEOUT); |
| 150 | + resizeRequest.setWaitForActiveShards(ActiveShardCount.ALL); |
| 151 | + resizeRequest.getTargetIndexRequest() |
| 152 | + .settings( |
| 153 | + Settings.builder().put(resizeRequest.getTargetIndexRequest().settings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) |
| 154 | + ); |
| 155 | + |
| 156 | + safeAwait( |
| 157 | + SubscribableListener.<CreateIndexResponse>newForked(l -> indicesAdmin().resizeIndex(resizeRequest, l)) |
| 158 | + .andThenAccept( |
| 159 | + createIndexResponse -> assertThat( |
| 160 | + true, |
| 161 | + allOf(equalTo(createIndexResponse.isAcknowledged()), equalTo(createIndexResponse.isShardsAcknowledged())) |
| 162 | + ) |
| 163 | + ) |
| 164 | + ); |
| 165 | + |
| 166 | + ensureGreen(sourceIndexName, targetIndexName); |
| 167 | + } |
| 168 | + |
106 | 169 | public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception { |
107 | 170 | internalCluster().startMasterOnlyNode(); |
108 | 171 | internalCluster().startDataOnlyNode(); |
|
0 commit comments