Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions docs/changelog/131950.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 131950
summary: Fix encoding of non-ascii field names in ignored source
area: Mapping
type: bug
issues: []
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ static byte[] encode(NameValue values) {

byte[] nameBytes = values.name.getBytes(StandardCharsets.UTF_8);
byte[] bytes = new byte[4 + nameBytes.length + values.value.length];
ByteUtils.writeIntLE(values.name.length() + PARENT_OFFSET_IN_NAME_OFFSET * values.parentOffset, bytes, 0);
ByteUtils.writeIntLE(nameBytes.length + PARENT_OFFSET_IN_NAME_OFFSET * values.parentOffset, bytes, 0);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just double checking, there is no need for an index version check here, given that decode isn't updated in this change. In other words, Indexing new documents in indices with older index version, would result in the error described in the PR description to not occur.

System.arraycopy(nameBytes, 0, bytes, 4, nameBytes.length);
System.arraycopy(values.value.bytes, values.value.offset, bytes, 4 + nameBytes.length, values.value.length);
return bytes;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,14 @@
package org.elasticsearch.index.mapper;

import org.apache.lucene.index.DirectoryReader;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.search.lookup.SourceFilter;
import org.elasticsearch.test.FieldMaskingReader;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.json.JsonXContent;
import org.hamcrest.Matchers;
import org.junit.Before;

Expand Down Expand Up @@ -123,6 +125,15 @@ public void testIgnoredString() throws IOException {
);
}

public void testIgnoredStringFullUnicode() throws IOException {
String value = randomUnicodeOfCodepointLengthBetween(5, 20);
String fieldName = randomUnicodeOfCodepointLength(5);

String expected = Strings.toString(JsonXContent.contentBuilder().startObject().field(fieldName, value).endObject());

assertEquals(expected, getSyntheticSourceWithFieldLimit(b -> b.field(fieldName, value)));
}

public void testIgnoredInt() throws IOException {
int value = randomInt();
assertEquals("{\"my_value\":" + value + "}", getSyntheticSourceWithFieldLimit(b -> b.field("my_value", value)));
Expand Down
Loading