Skip to content

Commit 0dcd553

Browse files
committed
rename
1 parent 568ebbe commit 0dcd553

File tree

4 files changed

+73
-77
lines changed

4 files changed

+73
-77
lines changed

server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -90,11 +90,11 @@ public class ShardsCapacityHealthIndicatorService implements HealthIndicatorServ
9090
);
9191

9292
private final ClusterService clusterService;
93-
private final List<ShardLimitValidator.ResultGroup> shardLimitResultGroups;
93+
private final List<ShardLimitValidator.LimitGroup> shardLimitGroups;
9494

9595
public ShardsCapacityHealthIndicatorService(ClusterService clusterService) {
9696
this.clusterService = clusterService;
97-
this.shardLimitResultGroups = ShardLimitValidator.applicableResultGroups(DiscoveryNode.isStateless(clusterService.getSettings()));
97+
this.shardLimitGroups = ShardLimitValidator.applicableLimitGroups(DiscoveryNode.isStateless(clusterService.getSettings()));
9898
}
9999

100100
@Override
@@ -111,13 +111,13 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources
111111
}
112112

113113
var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata();
114-
final List<StatusResult> statusResults = shardLimitResultGroups.stream()
114+
final List<StatusResult> statusResults = shardLimitGroups.stream()
115115
.map(
116-
resultGroup -> calculateFrom(
117-
ShardLimitValidator.getShardLimitPerNode(resultGroup, shardLimitsMetadata),
116+
limitGroup -> calculateFrom(
117+
ShardLimitValidator.getShardLimitPerNode(limitGroup, shardLimitsMetadata),
118118
state.nodes(),
119119
state.metadata(),
120-
resultGroup::checkShardLimit
120+
limitGroup::checkShardLimit
121121
)
122122
)
123123
.toList();
@@ -217,17 +217,17 @@ private HealthIndicatorResult unknownIndicator() {
217217
);
218218
}
219219

220-
private static String nodeTypeFroResultGroup(ShardLimitValidator.ResultGroup resultGroup) {
221-
return switch (resultGroup) {
220+
private static String nodeTypeFroResultGroup(ShardLimitValidator.LimitGroup limitGroup) {
221+
return switch (limitGroup) {
222222
case NORMAL -> "data";
223223
case FROZEN -> "frozen";
224224
case INDEX -> "index";
225225
case SEARCH -> "search";
226226
};
227227
}
228228

229-
private static Diagnosis diagnosisForResultGroup(ShardLimitValidator.ResultGroup resultGroup) {
230-
return switch (resultGroup) {
229+
private static Diagnosis diagnosisForResultGroup(ShardLimitValidator.LimitGroup limitGroup) {
230+
return switch (limitGroup) {
231231
case NORMAL, INDEX, SEARCH -> SHARDS_MAX_CAPACITY_REACHED_DATA_NODES;
232232
case FROZEN -> SHARDS_MAX_CAPACITY_REACHED_FROZEN_NODES;
233233
};

server/src/main/java/org/elasticsearch/indices/ShardLimitValidator.java

Lines changed: 25 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -93,15 +93,15 @@ private void setShardLimitPerNodeFrozen(int newValue) {
9393
this.shardLimitPerNodeFrozen.set(newValue);
9494
}
9595

96-
private int getShardLimitPerNode(ResultGroup resultGroup) {
97-
return switch (resultGroup) {
96+
private int getShardLimitPerNode(LimitGroup limitGroup) {
97+
return switch (limitGroup) {
9898
case NORMAL, INDEX, SEARCH -> shardLimitPerNode.get();
9999
case FROZEN -> shardLimitPerNodeFrozen.get();
100100
};
101101
}
102102

103-
public static int getShardLimitPerNode(ResultGroup resultGroup, HealthMetadata.ShardLimits shardLimits) {
104-
return switch (resultGroup) {
103+
public static int getShardLimitPerNode(LimitGroup limitGroup, HealthMetadata.ShardLimits shardLimits) {
104+
return switch (limitGroup) {
105105
case NORMAL, INDEX, SEARCH -> shardLimits.maxShardsPerNode();
106106
case FROZEN -> shardLimits.maxShardsPerNodeFrozen();
107107
};
@@ -116,9 +116,9 @@ public static int getShardLimitPerNode(ResultGroup resultGroup, HealthMetadata.S
116116
* @throws ValidationException if creating this index would put the cluster over the cluster shard limit
117117
*/
118118
public void validateShardLimit(final Settings settings, final DiscoveryNodes discoveryNodes, final Metadata metadata) {
119-
final var resultGroups = applicableResultGroups(isStateless);
119+
final var resultGroups = applicableLimitGroups(isStateless);
120120
final var shardsToCreatePerGroup = resultGroups.stream()
121-
.collect(Collectors.toUnmodifiableMap(Function.identity(), resultGroup -> resultGroup.newShardsTotal(settings)));
121+
.collect(Collectors.toUnmodifiableMap(Function.identity(), limitGroup -> limitGroup.newShardsTotal(settings)));
122122

123123
final var result = checkShardLimitOnGroups(resultGroups, shardsToCreatePerGroup, discoveryNodes, metadata);
124124
if (result.canAddShards == false) {
@@ -138,15 +138,15 @@ public void validateShardLimit(final Settings settings, final DiscoveryNodes dis
138138
* @throws ValidationException If this operation would take the cluster over the limit and enforcement is enabled.
139139
*/
140140
public void validateShardLimit(DiscoveryNodes discoveryNodes, Metadata metadata, Index[] indicesToOpen) {
141-
final var resultGroups = applicableResultGroups(isStateless);
142-
final Map<ResultGroup, Integer> shardsToCreatePerGroup = new HashMap<>();
141+
final var resultGroups = applicableLimitGroups(isStateless);
142+
final Map<LimitGroup, Integer> shardsToCreatePerGroup = new HashMap<>();
143143

144144
// TODO: we can short circuit when indindicesToOpenices is empty
145145
for (Index index : indicesToOpen) {
146146
IndexMetadata imd = metadata.indexMetadata(index);
147147
if (imd.getState().equals(IndexMetadata.State.CLOSE)) {
148148
resultGroups.forEach(
149-
resultGroup -> shardsToCreatePerGroup.merge(resultGroup, resultGroup.newShardsTotal(imd.getSettings()), Integer::sum)
149+
limitGroup -> shardsToCreatePerGroup.merge(limitGroup, limitGroup.newShardsTotal(imd.getSettings()), Integer::sum)
150150
);
151151
}
152152
}
@@ -160,18 +160,14 @@ public void validateShardLimit(DiscoveryNodes discoveryNodes, Metadata metadata,
160160
}
161161

162162
public void validateShardLimitOnReplicaUpdate(DiscoveryNodes discoveryNodes, Metadata metadata, Index[] indices, int replicas) {
163-
final var resultGroups = applicableResultGroups(isStateless);
164-
final Map<ResultGroup, Integer> shardsToCreatePerGroup = new HashMap<>();
163+
final var resultGroups = applicableLimitGroups(isStateless);
164+
final Map<LimitGroup, Integer> shardsToCreatePerGroup = new HashMap<>();
165165

166166
// TODO: we can short circuit when indices is empty
167167
for (Index index : indices) {
168168
IndexMetadata imd = metadata.indexMetadata(index);
169169
resultGroups.forEach(
170-
resultGroup -> shardsToCreatePerGroup.merge(
171-
resultGroup,
172-
resultGroup.newShardsTotal(imd.getSettings(), replicas),
173-
Integer::sum
174-
)
170+
limitGroup -> shardsToCreatePerGroup.merge(limitGroup, limitGroup.newShardsTotal(imd.getSettings(), replicas), Integer::sum)
175171
);
176172
}
177173

@@ -183,8 +179,8 @@ public void validateShardLimitOnReplicaUpdate(DiscoveryNodes discoveryNodes, Met
183179
}
184180
}
185181

186-
public static List<ResultGroup> applicableResultGroups(boolean isStateless) {
187-
return isStateless ? List.of(ResultGroup.INDEX, ResultGroup.SEARCH) : List.of(ResultGroup.NORMAL, ResultGroup.FROZEN);
182+
public static List<LimitGroup> applicableLimitGroups(boolean isStateless) {
183+
return isStateless ? List.of(LimitGroup.INDEX, LimitGroup.SEARCH) : List.of(LimitGroup.NORMAL, LimitGroup.FROZEN);
188184
}
189185

190186
/**
@@ -194,24 +190,24 @@ public static List<ResultGroup> applicableResultGroups(boolean isStateless) {
194190
* - If there's no room -> return the Result for _normal_ nodes (fail-fast)
195191
* - otherwise -> returns the Result of checking the limits for _frozen_ nodes
196192
*
197-
* @param resultGroups The applicable result groups to check for shard limits
193+
* @param limitGroups The applicable result groups to check for shard limits
198194
* @param shardsToCreatePerGroup The number of new shards to create per result group
199195
* @param discoveryNodes The nodes in the cluster
200196
* @param metadata The cluster state metadata
201197
*/
202198
private Result checkShardLimitOnGroups(
203-
List<ResultGroup> resultGroups,
204-
Map<ResultGroup, Integer> shardsToCreatePerGroup,
199+
List<LimitGroup> limitGroups,
200+
Map<LimitGroup, Integer> shardsToCreatePerGroup,
205201
DiscoveryNodes discoveryNodes,
206202
Metadata metadata
207203
) {
208-
assert resultGroups.containsAll(shardsToCreatePerGroup.keySet())
209-
: "result groups " + resultGroups + " do not contain groups for shards creation " + shardsToCreatePerGroup.keySet();
204+
assert limitGroups.containsAll(shardsToCreatePerGroup.keySet())
205+
: "result groups " + limitGroups + " do not contain groups for shards creation " + shardsToCreatePerGroup.keySet();
210206
// we verify the two limits independently. This also means that if they have mixed frozen and other data-roles nodes, such a mixed
211207
// node can have both 1000 normal and 3000 frozen shards. This is the trade-off to keep the simplicity of the counts. We advocate
212208
// against such mixed nodes for production use anyway.
213209
Result result = null;
214-
for (var resultGroup : resultGroups) {
210+
for (var resultGroup : limitGroups) {
215211
result = resultGroup.checkShardLimit(
216212
getShardLimitPerNode(resultGroup),
217213
shardsToCreatePerGroup.getOrDefault(resultGroup, 0),
@@ -252,15 +248,15 @@ static String errorMessageFrom(Result result) {
252248
+ ReferenceDocs.MAX_SHARDS_PER_NODE;
253249
}
254250

255-
public enum ResultGroup {
251+
public enum LimitGroup {
256252
NORMAL(NORMAL_GROUP),
257253
FROZEN(FROZEN_GROUP),
258254
INDEX("index"),
259255
SEARCH("search");
260256

261257
private final String groupName;
262258

263-
ResultGroup(String groupName) {
259+
LimitGroup(String groupName) {
264260
this.groupName = groupName;
265261
}
266262

@@ -284,10 +280,10 @@ public int numberOfNodes(DiscoveryNodes discoveryNodes) {
284280

285281
public int countShards(IndexMetadata indexMetadata) {
286282
return switch (this) {
287-
case NORMAL -> isOpenIndex(indexMetadata) && matchesIndexSettingGroup(indexMetadata, ResultGroup.NORMAL.groupName())
283+
case NORMAL -> isOpenIndex(indexMetadata) && matchesIndexSettingGroup(indexMetadata, LimitGroup.NORMAL.groupName())
288284
? indexMetadata.getTotalNumberOfShards()
289285
: 0;
290-
case FROZEN -> isOpenIndex(indexMetadata) && matchesIndexSettingGroup(indexMetadata, ResultGroup.FROZEN.groupName())
286+
case FROZEN -> isOpenIndex(indexMetadata) && matchesIndexSettingGroup(indexMetadata, LimitGroup.FROZEN.groupName())
291287
? indexMetadata.getTotalNumberOfShards()
292288
: 0;
293289
case INDEX -> isOpenIndex(indexMetadata) ? indexMetadata.getNumberOfShards() : 0;
@@ -409,7 +405,7 @@ public record Result(
409405
Optional<Long> currentUsedShards,
410406
int totalShardsToAdd,
411407
int maxShardsInCluster,
412-
ResultGroup group
408+
LimitGroup group
413409
) {}
414410

415411
}

server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@ public void testCalculateMethods() {
351351
Optional.empty(),
352352
randomInt(),
353353
randomInt(),
354-
randomFrom(ShardLimitValidator.ResultGroup.values())
354+
randomFrom(ShardLimitValidator.LimitGroup.values())
355355
);
356356
};
357357

0 commit comments

Comments
 (0)