Skip to content

Commit fd80fda

Browse files
HarishNarasimhanKHarish Narasimhan
authored andcommitted
Merge branch 'main' into main
Signed-off-by: 𝐇𝐚𝐫𝐢𝐬𝐡 𝐍𝐚𝐫𝐚𝐬𝐢𝐦𝐡𝐚𝐧 𝐊 <163456392+HarishNarasimhanK@users.noreply.github.com>
2 parents c89a57d + d0dddd9 commit fd80fda

File tree

9 files changed

+341
-23
lines changed

9 files changed

+341
-23
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
5959
- Fix terms lookup subquery fetch limit reading from non-existent index setting instead of cluster `max_clause_count` ([#20823](https://github.com/opensearch-project/OpenSearch/pull/20823))
6060
- Fix array_index_out_of_bounds_exception with wildcard and aggregations ([#20842](https://github.com/opensearch-project/OpenSearch/pull/20842))
6161
- Handle dependencies between analyzers ([#19248](https://github.com/opensearch-project/OpenSearch/pull/19248))
62+
- Fix `_field_caps` returning empty results and corrupted field names for `disable_objects: true` mappings ([#20800](https://github.com/opensearch-project/OpenSearch/pull/20800))
6263
- Reduce ClusterState retention in retry closures of TransportClusterManagerNodeAction ([#20858](https://github.com/opensearch-project/OpenSearch/pull/20858))
6364

6465
### Dependencies

gradle/run.gradle

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,9 @@ testClusters {
6060
for (String p : installedPlugins) {
6161
// check if its a local plugin first
6262
if (project.findProject(':plugins:' + p) != null) {
63-
plugin('plugins:' + p)
63+
plugin(':plugins:' + p)
64+
} else if (project.findProject(':sandbox:plugins:' + p) != null) {
65+
plugin(':sandbox:plugins:' + p)
6466
} else {
6567
// attempt to fetch it from maven
6668
project.repositories.mavenLocal()
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
---
2+
"Field caps with disable_objects returns leaf fields":
3+
4+
- skip:
5+
version: " - 3.4.99"
6+
reason: "disable_objects was introduced in 3.5.0"
7+
8+
- do:
9+
indices.create:
10+
index: test_disable_objects
11+
body:
12+
mappings:
13+
properties:
14+
attributes:
15+
type: object
16+
disable_objects: true
17+
18+
- do:
19+
index:
20+
index: test_disable_objects
21+
id: "1"
22+
body:
23+
attributes:
24+
foo:
25+
bar: baz
26+
27+
- do:
28+
indices.refresh:
29+
index: test_disable_objects
30+
31+
- do:
32+
field_caps:
33+
index: test_disable_objects
34+
fields: "*"
35+
36+
# Leaf field must exist
37+
- is_true: fields.attributes\.foo\.bar
38+
39+
# Parent object field must have type object
40+
- match: { fields.attributes.object.type: object }
41+
42+
# Intermediate path must not exist (since disable_objects is set)
43+
- is_false: fields.attributes\.foo
44+
45+
---
46+
"Field caps with disable_objects does not corrupt field names after second document indexed":
47+
48+
- skip:
49+
version: " - 3.4.99"
50+
reason: "disable_objects was introduced in 3.5.0"
51+
52+
- do:
53+
indices.create:
54+
index: test_disable_objects_merge
55+
body:
56+
mappings:
57+
properties:
58+
attributes:
59+
type: object
60+
disable_objects: true
61+
62+
# doc 1: creates the disable_objects leaf field
63+
- do:
64+
index:
65+
index: test_disable_objects_merge
66+
id: "1"
67+
body:
68+
attributes:
69+
foo:
70+
bar: baz
71+
72+
# doc 2: unrelated - must not corrupt the existing mapping
73+
- do:
74+
index:
75+
index: test_disable_objects_merge
76+
id: "2"
77+
body:
78+
key: 1
79+
80+
- do:
81+
indices.refresh:
82+
index: test_disable_objects_merge
83+
84+
- do:
85+
field_caps:
86+
index: test_disable_objects_merge
87+
fields: "*"
88+
89+
# Leaf field must exist
90+
- is_true: fields.attributes\.foo\.bar
91+
92+
# Corrupted field name must not exist
93+
- is_false: fields.attributes\.foo\.foo\.bar
94+
95+
# The unrelated field must also exist
96+
- is_true: fields.key
97+
98+
---
99+
"Field caps with normal nested object returns intermediate paths":
100+
101+
- do:
102+
indices.create:
103+
index: test_normal_nested
104+
body:
105+
mappings:
106+
properties:
107+
user:
108+
type: object
109+
properties:
110+
address:
111+
type: object
112+
properties:
113+
city:
114+
type: keyword
115+
116+
- do:
117+
index:
118+
index: test_normal_nested
119+
id: "1"
120+
body:
121+
user:
122+
address:
123+
city: Chemnitz
124+
125+
- do:
126+
indices.refresh:
127+
index: test_normal_nested
128+
129+
- do:
130+
field_caps:
131+
index: test_normal_nested
132+
fields: "*"
133+
134+
# Leaf field must exist
135+
- is_true: fields.user\.address\.city
136+
137+
# Intermediate object paths must also exist
138+
- match: { fields.user\.address.object.type: object }
139+
- match: { fields.user.object.type: object }

server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
import java.util.List;
3535
import java.util.Locale;
3636
import java.util.concurrent.TimeUnit;
37+
import java.util.concurrent.atomic.AtomicReference;
3738
import java.util.stream.Collectors;
3839

3940
import static org.opensearch.index.remote.RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT;
@@ -44,28 +45,23 @@ public class RemoteStoreBackpressureAndResiliencyIT extends AbstractRemoteStoreM
4445
public void testWritesRejectedDueToConsecutiveFailureBreach() throws Exception {
4546
// Here the doc size of the request remains same throughout the test. After initial indexing, all remote store interactions
4647
// fail leading to consecutive failure limit getting exceeded and leading to rejections.
47-
validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 10, ByteSizeUnit.KB.toIntBytes(1), 15, "failure_streak_count");
48+
validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 10, ByteSizeUnit.KB.toIntBytes(1), "failure_streak_count");
4849
}
4950

5051
public void testWritesRejectedDueToBytesLagBreach() throws Exception {
5152
// Initially indexing happens with doc size of 2 bytes, then all remote store interactions start failing. Now, the
5253
// indexing happens with doc size of 1KB leading to bytes lag limit getting exceeded and leading to rejections.
53-
validateBackpressure(ByteSizeUnit.BYTES.toIntBytes(2), 30, ByteSizeUnit.KB.toIntBytes(1), 15, "bytes_lag");
54+
validateBackpressure(ByteSizeUnit.BYTES.toIntBytes(2), 30, ByteSizeUnit.KB.toIntBytes(1), "bytes_lag");
5455
}
5556

5657
public void testWritesRejectedDueToTimeLagBreach() throws Exception {
5758
// Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the
5859
// indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections.
59-
validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 3, "time_lag");
60+
validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), "time_lag");
6061
}
6162

62-
private void validateBackpressure(
63-
int initialDocSize,
64-
int initialDocsToIndex,
65-
int onFailureDocSize,
66-
int onFailureDocsToIndex,
67-
String breachMode
68-
) throws Exception {
63+
private void validateBackpressure(int initialDocSize, int initialDocsToIndex, int onFailureDocSize, String breachMode)
64+
throws Exception {
6965
Path location = randomRepoPath().toAbsolutePath();
7066
String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE);
7167

@@ -92,10 +88,17 @@ private void validateBackpressure(
9288

9389
jsonString = generateString(onFailureDocSize);
9490
BytesReference onFailureSource = new BytesArray(jsonString);
95-
OpenSearchRejectedExecutionException ex = assertThrows(
96-
OpenSearchRejectedExecutionException.class,
97-
() -> indexDocAndRefresh(onFailureSource, onFailureDocsToIndex)
98-
);
91+
AtomicReference<OpenSearchRejectedExecutionException> exRef = new AtomicReference<>();
92+
assertBusy(() -> {
93+
try {
94+
indexDocAndRefresh(onFailureSource, 1);
95+
} catch (OpenSearchRejectedExecutionException e) {
96+
exRef.set(e);
97+
return;
98+
}
99+
fail("Expected OpenSearchRejectedExecutionException to be thrown");
100+
}, 30, TimeUnit.SECONDS);
101+
OpenSearchRejectedExecutionException ex = exRef.get();
99102
assertTrue(ex.getMessage().contains("rejected execution on primary shard"));
100103
assertTrue(ex.getMessage().contains(breachMode));
101104

server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ public final void cleanupNodeSettings() {
112112
);
113113
}
114114

115+
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17154")
115116
public void testHighCPUInEnforcedMode() throws InterruptedException {
116117
Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build();
117118
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get());
@@ -129,6 +130,7 @@ public void testHighCPUInEnforcedMode() throws InterruptedException {
129130
updateWorkloadGroupInClusterState(DELETE, workloadGroup);
130131
}
131132

133+
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17154")
132134
public void testHighCPUInMonitorMode() throws InterruptedException {
133135
WorkloadGroup workloadGroup = new WorkloadGroup(
134136
"name",
@@ -143,6 +145,7 @@ public void testHighCPUInMonitorMode() throws InterruptedException {
143145
updateWorkloadGroupInClusterState(DELETE, workloadGroup);
144146
}
145147

148+
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17154")
146149
public void testHighMemoryInEnforcedMode() throws InterruptedException {
147150
Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build();
148151
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get());
@@ -157,6 +160,7 @@ public void testHighMemoryInEnforcedMode() throws InterruptedException {
157160
updateWorkloadGroupInClusterState(DELETE, workloadGroup);
158161
}
159162

163+
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17154")
160164
public void testHighMemoryInMonitorMode() throws InterruptedException {
161165
WorkloadGroup workloadGroup = new WorkloadGroup(
162166
"name",

server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,12 @@ private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesInd
180180
if (mapperService.fieldType(parentField) == null) {
181181
// no field type, it must be an object field
182182
ObjectMapper mapper = mapperService.getObjectMapper(parentField);
183+
if (mapper == null) {
184+
// parentField is part of a literal dotted field name under a disable_objects=true parent
185+
// No ObjectMapper exists for this intermediate path so skip it and continue up the chain
186+
dotIndex = parentField.lastIndexOf('.');
187+
continue;
188+
}
183189
String type = mapper.nested().isNested() ? "nested" : "object";
184190
IndexFieldCapabilities fieldCap = new IndexFieldCapabilities(
185191
parentField,

server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -127,12 +127,14 @@ public ParametrizedFieldMapper merge(Mapper mergeWith) {
127127
Conflicts conflicts = new Conflicts(name());
128128
builder.merge((FieldMapper) mergeWith, conflicts);
129129
conflicts.check();
130-
return builder.build(new BuilderContext(Settings.EMPTY, parentPath(name())));
130+
return builder.build(new BuilderContext(Settings.EMPTY, parentPath(name(), simpleName())));
131131
}
132132

133-
private static ContentPath parentPath(String name) {
134-
int endPos = name.lastIndexOf(".");
135-
if (endPos == -1) {
133+
private static ContentPath parentPath(String name, String simpleName) {
134+
// Use simpleName to compute the parent path so that fields whose simpleName contains dots
135+
// (because of disable_objects) get the correct parent path
136+
int endPos = name.length() - simpleName.length() - 1;
137+
if (endPos < 0) {
136138
return new ContentPath(0);
137139
}
138140
return new ContentPath(name.substring(0, endPos));
@@ -619,7 +621,7 @@ private void merge(FieldMapper in, Conflicts conflicts) {
619621
param.merge(in, conflicts);
620622
}
621623
for (Mapper newSubField : in.multiFields) {
622-
multiFieldsBuilder.update(newSubField, parentPath(newSubField.name()));
624+
multiFieldsBuilder.update(newSubField, parentPath(newSubField.name(), newSubField.simpleName()));
623625
}
624626
this.copyTo.reset(in.copyTo);
625627
validate();

server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -243,13 +243,18 @@ public CompletableFuture<IndexInput> asyncLoadIndexInput(Executor executor) {
243243
return createIndexInput(fileCache, streamReader, request);
244244
} catch (Exception e) {
245245
fileCache.remove(request.getFilePath());
246-
throw new CompletionException(e);
246+
throw (e instanceof RuntimeException) ? (RuntimeException) e : new CompletionException(e);
247247
}
248248
}, executor).handle((indexInput, throwable) -> {
249-
fileCache.decRef(request.getFilePath());
250249
if (throwable != null) {
251-
result.completeExceptionally(throwable);
250+
// On failure, the entry was already removed from the cache in the
251+
// catch block above, so we must not decRef here.
252+
Throwable cause = (throwable instanceof CompletionException && throwable.getCause() != null)
253+
? throwable.getCause()
254+
: throwable;
255+
result.completeExceptionally(cause);
252256
} else {
257+
fileCache.decRef(request.getFilePath());
253258
result.complete(indexInput);
254259
}
255260
return null;

0 commit comments

Comments
 (0)