From 3dfc0e17b207cb5598011863c995738b7a5edb06 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Thu, 28 Aug 2025 16:20:08 -0600
Subject: [PATCH 01/20] Add
---
server/src/main/java/module-info.java | 1 +
.../action/index/SourceContext.java | 69 +++++++++++++++++++
2 files changed, 70 insertions(+)
create mode 100644 server/src/main/java/org/elasticsearch/action/index/SourceContext.java
diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java
index 549c603b13980..473ba566feec7 100644
--- a/server/src/main/java/module-info.java
+++ b/server/src/main/java/module-info.java
@@ -54,6 +54,7 @@
requires org.apache.lucene.queryparser;
requires org.apache.lucene.sandbox;
requires org.apache.lucene.suggest;
+ requires org.elasticsearch.server;
exports org.elasticsearch;
exports org.elasticsearch.action;
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
new file mode 100644
index 0000000000000..86edea6f22e3d
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.action.index;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.core.Releasable;
+import org.elasticsearch.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class SourceContext implements Writeable {
+
+ private XContentType contentType;
+ private BytesReference source;
+ private Releasable sourceReleasable;
+
+ public SourceContext(XContentType contentType, BytesReference source, Releasable sourceReleasable) {
+ this.contentType = contentType;
+ this.source = source;
+ this.sourceReleasable = sourceReleasable;
+ }
+
+ public SourceContext(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ // faster than StreamInput::readEnum, do not replace we read a lot of these instances at times
+ contentType = XContentType.ofOrdinal(in.readByte());
+ } else {
+ contentType = null;
+ }
+ source = in.readBytesReference();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (contentType != null) {
+ out.writeBoolean(true);
+ XContentHelper.writeTo(out, contentType);
+ } else {
+ out.writeBoolean(false);
+ }
+ out.writeBytesReference(source);
+ }
+
+ public XContentType getContentType() {
+ return contentType;
+ }
+
+ public BytesReference getSource() {
+ return source;
+ }
+
+ public Map sourceAsMap() {
+ return XContentHelper.convertToMap(source, false, contentType).v2();
+ }
+
+
+}
From c94f0c01806a5858112dc322def998cae436fc64 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Tue, 2 Sep 2025 14:49:36 -0600
Subject: [PATCH 02/20] Change
---
server/src/main/java/module-info.java | 1 -
.../org/elasticsearch/TransportVersions.java | 1 +
.../action/index/IndexRequest.java | 150 +++++++---------
.../action/index/IndexRequestBuilder.java | 2 +-
.../action/index/SourceContext.java | 166 +++++++++++++++++-
.../action/update/UpdateHelper.java | 7 +-
.../action/update/UpdateRequestBuilder.java | 5 +-
.../elasticsearch/ingest/IngestService.java | 3 +-
.../ShardBulkInferenceActionFilterTests.java | 23 +--
9 files changed, 245 insertions(+), 113 deletions(-)
diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java
index 473ba566feec7..549c603b13980 100644
--- a/server/src/main/java/module-info.java
+++ b/server/src/main/java/module-info.java
@@ -54,7 +54,6 @@
requires org.apache.lucene.queryparser;
requires org.apache.lucene.sandbox;
requires org.apache.lucene.suggest;
- requires org.elasticsearch.server;
exports org.elasticsearch;
exports org.elasticsearch.action;
diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java
index 81e4cca69769c..9a3b45a01a427 100644
--- a/server/src/main/java/org/elasticsearch/TransportVersions.java
+++ b/server/src/main/java/org/elasticsearch/TransportVersions.java
@@ -369,6 +369,7 @@ static TransportVersion def(int id) {
public static final TransportVersion SCRIPT_RESCORER = def(9_143_0_00);
public static final TransportVersion ESQL_LOOKUP_OPERATOR_EMITTED_ROWS = def(9_144_0_00);
public static final TransportVersion ALLOCATION_DECISION_NOT_PREFERRED = def(9_145_0_00);
+ public static final TransportVersion SOURCE_CONTEXT = def(9_146_0_00);
/*
* STOP! READ THIS FIRST! No, really,
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 707ea0919a91f..6a971ec9c430c 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -25,7 +25,6 @@
import org.elasticsearch.cluster.metadata.ProjectMetadata;
import org.elasticsearch.cluster.routing.IndexRouting;
import org.elasticsearch.common.UUIDs;
-import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -39,9 +38,7 @@
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.ingest.IngestService;
-import org.elasticsearch.plugins.internal.XContentParserDecorator;
import org.elasticsearch.xcontent.XContentBuilder;
-import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.xcontent.XContentType;
import java.io.IOException;
@@ -98,15 +95,13 @@ public class IndexRequest extends ReplicatedWriteRequest implement
@Nullable
private String routing;
- private BytesReference source;
+ private SourceContext sourceContext = new SourceContext();
private OpType opType = OpType.INDEX;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
- private XContentType contentType;
-
private String pipeline;
private String finalPipeline;
@@ -165,7 +160,14 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
}
id = in.readOptionalString();
routing = in.readOptionalString();
- source = in.readBytesReference();
+ boolean beforeSourceContext = in.getTransportVersion().before(TransportVersions.SOURCE_CONTEXT);
+ BytesReference source;
+ if (beforeSourceContext) {
+ source = in.readBytesReference();
+ } else {
+ sourceContext = new SourceContext(in);
+ source = null;
+ }
opType = OpType.fromId(in.readByte());
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
@@ -174,11 +176,15 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
isPipelineResolved = in.readBoolean();
isRetry = in.readBoolean();
autoGeneratedTimestamp = in.readLong();
- if (in.readBoolean()) {
- // faster than StreamInput::readEnum, do not replace we read a lot of these instances at times
- contentType = XContentType.ofOrdinal(in.readByte());
- } else {
- contentType = null;
+ if (beforeSourceContext) {
+ XContentType contentType;
+ if (in.readBoolean()) {
+ // faster than StreamInput::readEnum, do not replace we read a lot of these instances at times
+ contentType = XContentType.ofOrdinal(in.readByte());
+ } else {
+ contentType = null;
+ }
+ sourceContext = new SourceContext(contentType, source, () -> {});
}
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
@@ -250,10 +256,10 @@ private static String readPipelineName(StreamInput in) throws IOException {
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
- if (source == null) {
+ if (sourceContext.hasSource() == false) {
validationException = addValidationError("source is missing", validationException);
}
- if (contentType == null) {
+ if (sourceContext.contentType() == null) {
validationException = addValidationError("content type is missing", validationException);
}
assert opType == OpType.INDEX || opType == OpType.CREATE : "unexpected op-type: " + opType;
@@ -311,7 +317,7 @@ public ActionRequestValidationException validate() {
* source at index time
*/
public XContentType getContentType() {
- return contentType;
+ return sourceContext.contentType();
}
/**
@@ -412,15 +418,11 @@ public boolean isPipelineResolved() {
* The source of the document to index, recopied to a new array if it is unsafe.
*/
public BytesReference source() {
- return source;
+ return sourceContext.bytes();
}
public Map sourceAsMap() {
- return XContentHelper.convertToMap(source, false, contentType).v2();
- }
-
- public Map sourceAsMap(XContentParserDecorator parserDecorator) {
- return XContentHelper.convertToMap(source, false, contentType, parserDecorator).v2();
+ return sourceContext.sourceAsMap();
}
/**
@@ -429,7 +431,8 @@ public Map sourceAsMap(XContentParserDecorator parserDecorator)
* @param source The map to index
*/
public IndexRequest source(Map source) throws ElasticsearchGenerationException {
- return source(source, Requests.INDEX_CONTENT_TYPE);
+ sourceContext.source(source);
+ return this;
}
/**
@@ -438,24 +441,14 @@ public IndexRequest source(Map source) throws ElasticsearchGeneration
* @param source The map to index
*/
public IndexRequest source(Map source, XContentType contentType) throws ElasticsearchGenerationException {
- try {
- XContentBuilder builder = XContentFactory.contentBuilder(contentType);
- builder.map(source);
- return source(builder);
- } catch (IOException e) {
- throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
- }
+ sourceContext.source(source, contentType);
+ return this;
}
public IndexRequest source(Map source, XContentType contentType, boolean ensureNoSelfReferences)
throws ElasticsearchGenerationException {
- try {
- XContentBuilder builder = XContentFactory.contentBuilder(contentType);
- builder.map(source, ensureNoSelfReferences);
- return source(builder);
- } catch (IOException e) {
- throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
- }
+ sourceContext.source(source, contentType, ensureNoSelfReferences);
+ return this;
}
/**
@@ -465,14 +458,16 @@ public IndexRequest source(Map source, XContentType contentType, bool
* or using the {@link #source(byte[], XContentType)}.
*/
public IndexRequest source(String source, XContentType xContentType) {
- return source(new BytesArray(source), xContentType);
+ sourceContext.source(source, xContentType);
+ return this;
}
/**
* Sets the content source to index.
*/
public IndexRequest source(XContentBuilder sourceBuilder) {
- return source(BytesReference.bytes(sourceBuilder), sourceBuilder.contentType());
+ sourceContext.source(sourceBuilder);
+ return this;
}
/**
@@ -484,7 +479,8 @@ public IndexRequest source(XContentBuilder sourceBuilder) {
*
*/
public IndexRequest source(Object... source) {
- return source(Requests.INDEX_CONTENT_TYPE, source);
+ sourceContext.source(source);
+ return this;
}
/**
@@ -496,49 +492,20 @@ public IndexRequest source(Object... source) {
*
*/
public IndexRequest source(XContentType xContentType, Object... source) {
- return source(getXContentBuilder(xContentType, source));
+ sourceContext.source(xContentType, source);
+ return this;
}
- /**
- * Returns an XContentBuilder for the given xContentType and source array
- *
- * Note: the number of objects passed to this method as varargs must be an even
- * number. Also the first argument in each pair (the field name) must have a
- * valid String representation.
- *
- */
- public static XContentBuilder getXContentBuilder(XContentType xContentType, Object... source) {
- if (source.length % 2 != 0) {
- throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]");
- }
- if (source.length == 2 && source[0] instanceof BytesReference && source[1] instanceof Boolean) {
- throw new IllegalArgumentException(
- "you are using the removed method for source with bytes and unsafe flag, the unsafe flag"
- + " was removed, please just use source(BytesReference)"
- );
- }
- try {
- XContentBuilder builder = XContentFactory.contentBuilder(xContentType);
- builder.startObject();
- // This for loop increments by 2 because the source array contains adjacent key/value pairs:
- for (int i = 0; i < source.length; i = i + 2) {
- String field = source[i].toString();
- Object value = source[i + 1];
- builder.field(field, value);
- }
- builder.endObject();
- return builder;
- } catch (IOException e) {
- throw new ElasticsearchGenerationException("Failed to generate", e);
- }
+ public IndexRequest sourceContext(SourceContext sourceContext) {
+ sourceContext = Objects.requireNonNull(sourceContext);
+ return this;
}
/**
* Sets the document to index in bytes form.
*/
public IndexRequest source(BytesReference source, XContentType xContentType) {
- this.source = Objects.requireNonNull(source);
- this.contentType = Objects.requireNonNull(xContentType);
+ sourceContext.source(Objects.requireNonNull(source), Objects.requireNonNull(xContentType));
return this;
}
@@ -546,7 +513,8 @@ public IndexRequest source(BytesReference source, XContentType xContentType) {
* Sets the document to index in bytes form.
*/
public IndexRequest source(byte[] source, XContentType xContentType) {
- return source(source, 0, source.length, xContentType);
+ sourceContext.source(source, xContentType);
+ return this;
}
/**
@@ -558,7 +526,8 @@ public IndexRequest source(byte[] source, XContentType xContentType) {
* @param length The length of the data
*/
public IndexRequest source(byte[] source, int offset, int length, XContentType xContentType) {
- return source(new BytesArray(source, offset, length), xContentType);
+ sourceContext.source(source, offset, length, xContentType);
+ return this;
}
/**
@@ -769,7 +738,11 @@ private void writeBody(StreamOutput out) throws IOException {
}
out.writeOptionalString(id);
out.writeOptionalString(routing);
- out.writeBytesReference(source);
+ if (out.getTransportVersion().onOrAfter(TransportVersions.SOURCE_CONTEXT)) {
+ sourceContext.writeTo(out);
+ } else {
+ out.writeBytesReference(sourceContext.bytes());
+ }
out.writeByte(opType.getId());
out.writeLong(version);
out.writeByte(versionType.getValue());
@@ -778,11 +751,14 @@ private void writeBody(StreamOutput out) throws IOException {
out.writeBoolean(isPipelineResolved);
out.writeBoolean(isRetry);
out.writeLong(autoGeneratedTimestamp);
- if (contentType != null) {
- out.writeBoolean(true);
- XContentHelper.writeTo(out, contentType);
- } else {
- out.writeBoolean(false);
+ if (out.getTransportVersion().before(TransportVersions.SOURCE_CONTEXT)) {
+ XContentType contentType = sourceContext.contentType();
+ if (contentType != null) {
+ out.writeBoolean(true);
+ XContentHelper.writeTo(out, contentType);
+ } else {
+ out.writeBoolean(false);
+ }
}
out.writeZLong(ifSeqNo);
out.writeVLong(ifPrimaryTerm);
@@ -821,13 +797,13 @@ private void writeBody(StreamOutput out) throws IOException {
public String toString() {
String sSource = "_na_";
try {
- if (source.length() > MAX_SOURCE_LENGTH_IN_TOSTRING) {
+ if (sourceContext.byteLength() > MAX_SOURCE_LENGTH_IN_TOSTRING) {
sSource = "n/a, actual length: ["
- + ByteSizeValue.ofBytes(source.length()).toString()
+ + ByteSizeValue.ofBytes(sourceContext.byteLength()).toString()
+ "], max length: "
+ ByteSizeValue.ofBytes(MAX_SOURCE_LENGTH_IN_TOSTRING).toString();
} else {
- sSource = XContentHelper.convertToJson(source, false);
+ sSource = XContentHelper.convertToJson(sourceContext.bytes(), false);
}
} catch (Exception e) {
// ignore
@@ -862,7 +838,7 @@ public long getAutoGeneratedTimestamp() {
@Override
public long ramBytesUsed() {
- return SHALLOW_SIZE + RamUsageEstimator.sizeOf(id) + (source == null ? 0 : source.length());
+ return SHALLOW_SIZE + RamUsageEstimator.sizeOf(id) + sourceContext.byteLength();
}
@Override
@@ -917,7 +893,7 @@ public Index getConcreteWriteIndex(IndexAbstraction ia, ProjectMetadata project)
@Override
public int route(IndexRouting indexRouting) {
- return indexRouting.indexShard(id, routing, contentType, source);
+ return indexRouting.indexShard(id, routing, sourceContext.contentType(), sourceContext.bytes());
}
public IndexRequest setRequireAlias(boolean requireAlias) {
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
index 3041e6bf5e274..947d6bf610f49 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
@@ -176,7 +176,7 @@ public IndexRequestBuilder setSource(Object... source) {
*
*/
public IndexRequestBuilder setSource(XContentType xContentType, Object... source) {
- return setSource(IndexRequest.getXContentBuilder(xContentType, source));
+ return setSource(SourceContext.getXContentBuilder(xContentType, source));
}
/**
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
index 86edea6f22e3d..3ecc7a4358646 100644
--- a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
+++ b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
@@ -9,23 +9,35 @@
package org.elasticsearch.action.index;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.client.internal.Requests;
+import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.Releasable;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.xcontent.XContentType;
import java.io.IOException;
import java.util.Map;
+import java.util.Objects;
-public class SourceContext implements Writeable {
+public class SourceContext implements Writeable, Releasable {
private XContentType contentType;
private BytesReference source;
private Releasable sourceReleasable;
+ public SourceContext() {}
+
+ public SourceContext(XContentType contentType, BytesReference source) {
+ this(contentType, source, () -> {});
+ }
+
public SourceContext(XContentType contentType, BytesReference source, Releasable sourceReleasable) {
this.contentType = contentType;
this.source = source;
@@ -53,17 +65,165 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBytesReference(source);
}
- public XContentType getContentType() {
+ public XContentType contentType() {
return contentType;
}
- public BytesReference getSource() {
+ public BytesReference bytes() {
return source;
}
+ public boolean hasSource() {
+ return source != null;
+ }
+
+ public int byteLength() {
+ return source == null ? 0 : source.length();
+ }
+
+ @Override
+ public void close() {
+ sourceReleasable.close();
+ }
+
public Map sourceAsMap() {
return XContentHelper.convertToMap(source, false, contentType).v2();
}
+ /**
+ * Index the Map in {@link Requests#INDEX_CONTENT_TYPE} format
+ *
+ * @param source The map to index
+ */
+ public void source(Map source) throws ElasticsearchGenerationException {
+ source(source, Requests.INDEX_CONTENT_TYPE);
+ }
+
+ /**
+ * Index the Map as the provided content type.
+ *
+ * @param source The map to index
+ */
+ public void source(Map source, XContentType contentType) throws ElasticsearchGenerationException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source);
+ source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+ public void source(Map source, XContentType contentType, boolean ensureNoSelfReferences)
+ throws ElasticsearchGenerationException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source, ensureNoSelfReferences);
+ source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+
+ /**
+ * Sets the document source to index.
+ *
+ * Note, its preferable to either set it using {@link #source(org.elasticsearch.xcontent.XContentBuilder)}
+ * or using the {@link #source(byte[], XContentType)}.
+ */
+ public void source(String source, XContentType xContentType) {
+ source(new BytesArray(source), xContentType);
+ }
+
+ /**
+ * Sets the content source to index.
+ */
+ public void source(XContentBuilder sourceBuilder) {
+ source(BytesReference.bytes(sourceBuilder), sourceBuilder.contentType());
+ }
+
+ /**
+ * Sets the content source to index using the default content type ({@link Requests#INDEX_CONTENT_TYPE})
+ *
+ * Note: the number of objects passed to this method must be an even
+ * number. Also the first argument in each pair (the field name) must have a
+ * valid String representation.
+ *
+ */
+ public void source(Object... source) {
+ source(Requests.INDEX_CONTENT_TYPE, source);
+ }
+
+ /**
+ * Sets the content source to index.
+ *
+ * Note: the number of objects passed to this method as varargs must be an even
+ * number. Also the first argument in each pair (the field name) must have a
+ * valid String representation.
+ *
+ */
+ public void source(XContentType xContentType, Object... source) {
+ source(getXContentBuilder(xContentType, source));
+ }
+
+ /**
+ * Returns an XContentBuilder for the given xContentType and source array
+ *
+ * Note: the number of objects passed to this method as varargs must be an even
+ * number. Also the first argument in each pair (the field name) must have a
+ * valid String representation.
+ *
+ */
+ public static XContentBuilder getXContentBuilder(XContentType xContentType, Object... source) {
+ if (source.length % 2 != 0) {
+ throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]");
+ }
+ if (source.length == 2 && source[0] instanceof BytesReference && source[1] instanceof Boolean) {
+ throw new IllegalArgumentException(
+ "you are using the removed method for source with bytes and unsafe flag, the unsafe flag"
+ + " was removed, please just use source(BytesReference)"
+ );
+ }
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(xContentType);
+ builder.startObject();
+ // This for loop increments by 2 because the source array contains adjacent key/value pairs:
+ for (int i = 0; i < source.length; i = i + 2) {
+ String field = source[i].toString();
+ Object value = source[i + 1];
+ builder.field(field, value);
+ }
+ builder.endObject();
+ return builder;
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate", e);
+ }
+ }
+
+ /**
+ * Sets the document to index in bytes form.
+ */
+ public void source(BytesReference source, XContentType xContentType) {
+ this.source = Objects.requireNonNull(source);
+ this.contentType = Objects.requireNonNull(xContentType);
+ }
+
+ /**
+ * Sets the document to index in bytes form.
+ */
+ public void source(byte[] source, XContentType xContentType) {
+ source(source, 0, source.length, xContentType);
+ }
+
+ /**
+ * Sets the document to index in bytes form (assumed to be safe to be used from different
+ * threads).
+ *
+ * @param source The source to index
+ * @param offset The offset in the byte array
+ * @param length The length of the data
+ */
+ public void source(byte[] source, int offset, int length, XContentType xContentType) {
+ source(new BytesArray(source, offset, length), xContentType);
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
index 39600c7eca661..5cbd673a91683 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
@@ -28,7 +28,6 @@
import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.plugins.internal.XContentParserDecorator;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.UpdateCtxMap;
@@ -193,11 +192,7 @@ Result prepareUpdateIndexRequest(IndexShard indexShard, UpdateRequest request, G
final XContentType updateSourceContentType = sourceAndContent.v1();
final Map updatedSourceAsMap = sourceAndContent.v2();
- final boolean noop = XContentHelper.update(
- updatedSourceAsMap,
- currentRequest.sourceAsMap(XContentParserDecorator.NOOP),
- detectNoop
- ) == false;
+ final boolean noop = XContentHelper.update(updatedSourceAsMap, currentRequest.sourceAsMap(), detectNoop) == false;
// We can only actually turn the update into a noop if detectNoop is true to preserve backwards compatibility and to handle cases
// where users repopulating multi-fields or adding synonyms, etc.
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
index ae1335008f64a..0ff3572a7a574 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
@@ -11,6 +11,7 @@
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.SourceContext;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteRequestBuilder;
@@ -293,7 +294,7 @@ public UpdateRequestBuilder setDoc(Object... source) {
* is a field and value pairs.
*/
public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source) {
- return setDoc(IndexRequest.getXContentBuilder(xContentType, source));
+ return setDoc(SourceContext.getXContentBuilder(xContentType, source));
}
/**
@@ -372,7 +373,7 @@ public UpdateRequestBuilder setUpsert(Object... source) {
* includes field and value pairs.
*/
public UpdateRequestBuilder setUpsert(XContentType xContentType, Object... source) {
- return setUpsert(IndexRequest.getXContentBuilder(xContentType, source));
+ return setUpsert(SourceContext.getXContentBuilder(xContentType, source));
}
/**
diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java
index ea8e9d3843296..db877edd9bf69 100644
--- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java
+++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java
@@ -77,7 +77,6 @@
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.node.ReportingService;
import org.elasticsearch.plugins.IngestPlugin;
-import org.elasticsearch.plugins.internal.XContentParserDecorator;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.Scheduler;
import org.elasticsearch.threadpool.ThreadPool;
@@ -1453,7 +1452,7 @@ private static IngestDocument newIngestDocument(final IndexRequest request) {
request.version(),
request.routing(),
request.versionType(),
- request.sourceAsMap(XContentParserDecorator.NOOP)
+ request.sourceAsMap()
);
}
diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java
index e96fda569aa12..36dc512a5286c 100644
--- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java
+++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java
@@ -18,6 +18,7 @@
import org.elasticsearch.action.bulk.BulkShardResponse;
import org.elasticsearch.action.bulk.TransportShardBulkAction;
import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.SourceContext;
import org.elasticsearch.action.support.ActionFilterChain;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
@@ -573,21 +574,21 @@ public void testIndexingPressure() throws Exception {
inferenceStats
);
- XContentBuilder doc0Source = IndexRequest.getXContentBuilder(XContentType.JSON, "sparse_field", "a test value");
- XContentBuilder doc1Source = IndexRequest.getXContentBuilder(XContentType.JSON, "dense_field", "another test value");
- XContentBuilder doc2Source = IndexRequest.getXContentBuilder(
+ XContentBuilder doc0Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "a test value");
+ XContentBuilder doc1Source = SourceContext.getXContentBuilder(XContentType.JSON, "dense_field", "another test value");
+ XContentBuilder doc2Source = SourceContext.getXContentBuilder(
XContentType.JSON,
"sparse_field",
"a test value",
"dense_field",
"another test value"
);
- XContentBuilder doc3Source = IndexRequest.getXContentBuilder(
+ XContentBuilder doc3Source = SourceContext.getXContentBuilder(
XContentType.JSON,
"dense_field",
List.of("value one", " ", "value two")
);
- XContentBuilder doc4Source = IndexRequest.getXContentBuilder(XContentType.JSON, "sparse_field", " ");
+ XContentBuilder doc4Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", " ");
XContentBuilder doc5Source = XContentFactory.contentBuilder(XContentType.JSON);
{
doc5Source.startObject();
@@ -601,8 +602,8 @@ public void testIndexingPressure() throws Exception {
);
doc5Source.endObject();
}
- XContentBuilder doc0UpdateSource = IndexRequest.getXContentBuilder(XContentType.JSON, "sparse_field", "an updated value");
- XContentBuilder doc1UpdateSource = IndexRequest.getXContentBuilder(XContentType.JSON, "dense_field", null);
+ XContentBuilder doc0UpdateSource = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "an updated value");
+ XContentBuilder doc1UpdateSource = SourceContext.getXContentBuilder(XContentType.JSON, "dense_field", null);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
@@ -692,7 +693,7 @@ public void testIndexingPressureTripsOnInferenceRequestGeneration() throws Excep
inferenceStats
);
- XContentBuilder doc1Source = IndexRequest.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
+ XContentBuilder doc1Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
@@ -761,7 +762,7 @@ public void testIndexingPressureTripsOnInferenceRequestGeneration() throws Excep
@SuppressWarnings("unchecked")
public void testIndexingPressureTripsOnInferenceResponseHandling() throws Exception {
- final XContentBuilder doc1Source = IndexRequest.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
+ final XContentBuilder doc1Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
final InstrumentedIndexingPressure indexingPressure = new InstrumentedIndexingPressure(
Settings.builder().put(MAX_COORDINATING_BYTES.getKey(), (length(doc1Source) + 1) + "b").build()
);
@@ -848,8 +849,8 @@ public void testIndexingPressureTripsOnInferenceResponseHandling() throws Except
@SuppressWarnings("unchecked")
public void testIndexingPressurePartialFailure() throws Exception {
// Use different length strings so that doc 1 and doc 2 sources are different sizes
- final XContentBuilder doc1Source = IndexRequest.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
- final XContentBuilder doc2Source = IndexRequest.getXContentBuilder(XContentType.JSON, "sparse_field", "bazzz");
+ final XContentBuilder doc1Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
+ final XContentBuilder doc2Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "bazzz");
final StaticModel sparseModel = StaticModel.createRandomInstance(TaskType.SPARSE_EMBEDDING);
final ChunkedInferenceEmbedding barEmbedding = randomChunkedInferenceEmbedding(sparseModel, List.of("bar"));
From 8984571262b30723317e5bb99cd108d5eb0de124 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Tue, 2 Sep 2025 16:31:23 -0600
Subject: [PATCH 03/20] remove prod source accessors
---
.../action/bulk/BulkRequest.java | 6 +++---
.../action/bulk/BulkRequestModifier.java | 2 +-
.../action/bulk/BulkShardRequest.java | 19 +++++++++----------
.../action/bulk/TransportShardBulkAction.java | 6 +++---
.../bulk/TransportSimulateBulkAction.java | 4 ++--
.../action/index/IndexRequest.java | 4 ++++
.../action/update/TransportUpdateAction.java | 4 ++--
.../action/update/UpdateRequest.java | 4 ++--
.../cluster/metadata/DataStream.java | 2 +-
.../action/MonitoringBulkRequest.java | 2 +-
.../ShardBulkInferenceActionFilter.java | 13 +++++++++----
.../actions/index/ExecutableIndexAction.java | 2 +-
.../xpack/watcher/history/HistoryStore.java | 2 +-
13 files changed, 39 insertions(+), 31 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
index 93ac6f2a21143..7d98aaba57128 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
@@ -177,7 +177,7 @@ BulkRequest internalAdd(IndexRequest request) {
requests.add(request);
// lack of source is validated in validate() method
- sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD;
+ sizeInBytes += request.sourceContext().byteLength() + REQUEST_OVERHEAD;
indices.add(request.index());
return this;
}
@@ -195,10 +195,10 @@ BulkRequest internalAdd(UpdateRequest request) {
requests.add(request);
if (request.doc() != null) {
- sizeInBytes += request.doc().source().length();
+ sizeInBytes += request.doc().sourceContext().byteLength();
}
if (request.upsertRequest() != null) {
- sizeInBytes += request.upsertRequest().source().length();
+ sizeInBytes += request.upsertRequest().sourceContext().byteLength();
}
if (request.script() != null) {
sizeInBytes += request.script().getIdOrCode().length() * 2;
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java
index 65b71d4b0b40b..e708c630a00e3 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java
@@ -296,7 +296,7 @@ public void markItemForFailureStore(int slot, String targetIndexName, Exception
() -> "Encountered exception while attempting to redirect a failed ingest operation: index ["
+ targetIndexName
+ "], source: ["
- + indexRequest.source().utf8ToString()
+ + indexRequest.sourceContext().bytes().utf8ToString()
+ "]",
ioException
);
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
index 90e5b0b7ccf0d..7a99ff3f8b446 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
@@ -90,13 +90,11 @@ public long totalSizeInBytes() {
for (int i = 0; i < items.length; i++) {
DocWriteRequest> request = items[i].request();
if (request instanceof IndexRequest) {
- if (((IndexRequest) request).source() != null) {
- totalSizeInBytes += ((IndexRequest) request).source().length();
- }
+ totalSizeInBytes += ((IndexRequest) request).sourceContext().byteLength();
} else if (request instanceof UpdateRequest) {
IndexRequest doc = ((UpdateRequest) request).doc();
- if (doc != null && doc.source() != null) {
- totalSizeInBytes += ((UpdateRequest) request).doc().source().length();
+ if (doc != null) {
+ totalSizeInBytes += ((UpdateRequest) request).doc().sourceContext().byteLength();
}
}
}
@@ -108,13 +106,14 @@ public long maxOperationSizeInBytes() {
for (int i = 0; i < items.length; i++) {
DocWriteRequest> request = items[i].request();
if (request instanceof IndexRequest) {
- if (((IndexRequest) request).source() != null) {
- maxOperationSizeInBytes = Math.max(maxOperationSizeInBytes, ((IndexRequest) request).source().length());
- }
+ maxOperationSizeInBytes = Math.max(maxOperationSizeInBytes, ((IndexRequest) request).sourceContext().byteLength());
} else if (request instanceof UpdateRequest) {
IndexRequest doc = ((UpdateRequest) request).doc();
- if (doc != null && doc.source() != null) {
- maxOperationSizeInBytes = Math.max(maxOperationSizeInBytes, ((UpdateRequest) request).doc().source().length());
+ if (doc != null) {
+ maxOperationSizeInBytes = Math.max(
+ maxOperationSizeInBytes,
+ ((UpdateRequest) request).doc().sourceContext().byteLength()
+ );
}
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
index 01968596db932..8511223017c92 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
@@ -410,7 +410,7 @@ static boolean executeBulkItemRequest(
XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator(request);
final SourceToParse sourceToParse = new SourceToParse(
request.id(),
- request.source(),
+ request.sourceContext().bytes(),
request.getContentType(),
request.routing(),
request.getDynamicTemplates(),
@@ -609,7 +609,7 @@ private static BulkItemResponse processUpdateResponse(
);
if (updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) {
- final BytesReference indexSourceAsBytes = updateIndexRequest.source();
+ final BytesReference indexSourceAsBytes = updateIndexRequest.sourceContext().bytes();
final Tuple> sourceAndContent = XContentHelper.convertToMap(
indexSourceAsBytes,
true,
@@ -741,7 +741,7 @@ private static Engine.Result performOpOnReplica(
final IndexRequest indexRequest = (IndexRequest) docWriteRequest;
final SourceToParse sourceToParse = new SourceToParse(
indexRequest.id(),
- indexRequest.source(),
+ indexRequest.sourceContext().bytes(),
indexRequest.getContentType(),
indexRequest.routing()
);
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
index b52f5447b9311..2f622f12d7ccb 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
@@ -159,7 +159,7 @@ protected void doInternalExecute(
request.id(),
request.index(),
request.version(),
- request.source(),
+ request.sourceContext().bytes(),
request.getContentType(),
request.getExecutedPipelines(),
validationResult.ignoredFields,
@@ -201,7 +201,7 @@ private ValidationResult validateMappings(
) {
final SourceToParse sourceToParse = new SourceToParse(
request.id(),
- request.source(),
+ request.sourceContext().bytes(),
request.getContentType(),
request.routing(),
request.getDynamicTemplates(),
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 6a971ec9c430c..6ee922ae6b728 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -414,6 +414,10 @@ public boolean isPipelineResolved() {
return this.isPipelineResolved;
}
+ public SourceContext sourceContext() {
+ return sourceContext;
+ }
+
/**
* The source of the document to index, recopied to a new array if it is unsafe.
*/
diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
index 617a3c5c49b3e..11de6b9d547e9 100644
--- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
@@ -226,7 +226,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener<
case CREATED -> {
IndexRequest upsertRequest = result.action();
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
- final BytesReference upsertSourceBytes = upsertRequest.source();
+ final BytesReference upsertSourceBytes = upsertRequest.sourceContext().bytes();
client.bulk(
toSingleItemBulkRequest(upsertRequest),
unwrappingSingleItemBulkResponse(ActionListener.wrap(response -> {
@@ -269,7 +269,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener<
case UPDATED -> {
IndexRequest indexRequest = result.action();
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
- final BytesReference indexSourceBytes = indexRequest.source();
+ final BytesReference indexSourceBytes = indexRequest.sourceContext().bytes();
client.bulk(
toSingleItemBulkRequest(indexRequest),
unwrappingSingleItemBulkResponse(ActionListener.wrap(response -> {
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
index 657ad029626af..2489d069ea737 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
@@ -759,7 +759,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
XContentParser parser = XContentHelper.createParser(
NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE,
- doc.source(),
+ doc.sourceContext().bytes(),
xContentType
)
) {
@@ -782,7 +782,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
XContentParser parser = XContentHelper.createParser(
NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE,
- upsertRequest.source(),
+ upsertRequest.sourceContext().bytes(),
xContentType
)
) {
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
index fddcc838e3a11..d38e2f7588e2a 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
@@ -1732,7 +1732,7 @@ public Index getWriteIndex(IndexRequest request, ProjectMetadata project) {
if (rawTimestamp != null) {
timestamp = getTimeStampFromRaw(rawTimestamp);
} else {
- timestamp = getTimestampFromParser(request.source(), request.getContentType());
+ timestamp = getTimestampFromParser(request.sourceContext().bytes(), request.getContentType());
}
timestamp = getCanonicalTimestampBound(timestamp);
Index result = selectTimeSeriesWriteIndex(timestamp, project);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java
index 4b65bc3350080..504b717c61cd6 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java
@@ -100,7 +100,7 @@ public MonitoringBulkRequest add(
if (MonitoringIndex.from(indexRequest.index()) != MonitoringIndex.TIMESTAMPED) {
return;
}
- final BytesReference source = indexRequest.source();
+ final BytesReference source = indexRequest.sourceContext().bytes();
if (source.length() == 0) {
throw new IllegalArgumentException(
"source is missing for monitoring document [" + indexRequest.index() + "][" + type + "][" + indexRequest.id() + "]"
diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
index 5b73ba6dbcbdf..07ad13c72bcb8 100644
--- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
+++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
@@ -629,7 +629,7 @@ private boolean incrementIndexingPressure(IndexRequestWithIndexingPressure index
if (indexRequest.isIndexingPressureIncremented() == false) {
try {
// Track operation count as one operation per document source update
- coordinatingIndexingPressure.increment(1, indexRequest.getIndexRequest().source().length());
+ coordinatingIndexingPressure.increment(1, indexRequest.getIndexRequest().sourceContext().bytes().length());
indexRequest.setIndexingPressureIncremented();
} catch (EsRejectedExecutionException e) {
addInferenceResponseFailure(
@@ -724,7 +724,7 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons
inferenceFieldsMap.put(fieldName, result);
}
- BytesReference originalSource = indexRequest.source();
+ BytesReference originalSource = indexRequest.sourceContext().bytes();
if (useLegacyFormat) {
var newDocMap = indexRequest.sourceAsMap();
for (var entry : inferenceFieldsMap.entrySet()) {
@@ -733,11 +733,16 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons
indexRequest.source(newDocMap, indexRequest.getContentType());
} else {
try (XContentBuilder builder = XContentBuilder.builder(indexRequest.getContentType().xContent())) {
- appendSourceAndInferenceMetadata(builder, indexRequest.source(), indexRequest.getContentType(), inferenceFieldsMap);
+ appendSourceAndInferenceMetadata(
+ builder,
+ indexRequest.sourceContext().bytes(),
+ indexRequest.getContentType(),
+ inferenceFieldsMap
+ );
indexRequest.source(builder);
}
}
- long modifiedSourceSize = indexRequest.source().length();
+ long modifiedSourceSize = indexRequest.sourceContext().bytes().length();
// Add the indexing pressure from the source modifications.
// Don't increment operation count because we count one source update as one operation, and we already accounted for those
diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java
index b385298c09062..ef4282c744c94 100644
--- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java
+++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java
@@ -110,7 +110,7 @@ public Action.Result execute(String actionId, WatchExecutionContext ctx, Payload
indexRequest.index(),
indexRequest.id(),
action.refreshPolicy,
- new XContentSource(indexRequest.source(), XContentType.JSON)
+ new XContentSource(indexRequest.sourceContext().bytes(), XContentType.JSON)
);
}
diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java
index 98bf3e7ab40a4..78d56fc5c3e2f 100644
--- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java
+++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java
@@ -56,7 +56,7 @@ public void put(WatchRecord watchRecord) throws Exception {
watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS);
IndexRequest request = new IndexRequest(HistoryStoreField.DATA_STREAM).id(watchRecord.id().value()).source(builder);
request.opType(IndexRequest.OpType.CREATE);
- if (request.source().length() > maxHistoryRecordSize.getBytes()) {
+ if (request.sourceContext().bytes().length() > maxHistoryRecordSize.getBytes()) {
WatchRecord redactedWatchRecord = watchRecord.dropLargeFields();
try (XContentBuilder redactedBuilder = XContentFactory.jsonBuilder()) {
redactedWatchRecord.toXContent(redactedBuilder, WatcherParams.HIDE_SECRETS);
From ffb233bf2932b9d384cc051d76b08c5d23c5eedf Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Tue, 2 Sep 2025 20:47:51 -0600
Subject: [PATCH 04/20] Change
---
.../action/index/SourceContext.java | 34 ++++++++++++++-----
.../LimitAwareBulkIndexerTests.java | 2 ++
2 files changed, 27 insertions(+), 9 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
index 3ecc7a4358646..7156e92c82d84 100644
--- a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
+++ b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
@@ -13,6 +13,7 @@
import org.elasticsearch.client.internal.Requests;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@@ -24,10 +25,11 @@
import java.io.IOException;
import java.util.Map;
-import java.util.Objects;
public class SourceContext implements Writeable, Releasable {
+ private static final Releasable NOOP_RELEASABLE = () -> {};
+
private XContentType contentType;
private BytesReference source;
private Releasable sourceReleasable;
@@ -35,7 +37,7 @@ public class SourceContext implements Writeable, Releasable {
public SourceContext() {}
public SourceContext(XContentType contentType, BytesReference source) {
- this(contentType, source, () -> {});
+ this(contentType, source, NOOP_RELEASABLE);
}
public SourceContext(XContentType contentType, BytesReference source, Releasable sourceReleasable) {
@@ -200,19 +202,22 @@ public static XContentBuilder getXContentBuilder(XContentType xContentType, Obje
}
}
+ public void source(ReleasableBytesReference source, XContentType contentType) {
+ setSource(source, contentType, source);
+ }
+
/**
* Sets the document to index in bytes form.
*/
- public void source(BytesReference source, XContentType xContentType) {
- this.source = Objects.requireNonNull(source);
- this.contentType = Objects.requireNonNull(xContentType);
+ public void source(BytesReference source, XContentType contentType) {
+ setSource(source, contentType);
}
/**
* Sets the document to index in bytes form.
*/
- public void source(byte[] source, XContentType xContentType) {
- source(source, 0, source.length, xContentType);
+ public void source(byte[] source, XContentType contentType) {
+ source(source, 0, source.length, contentType);
}
/**
@@ -223,7 +228,18 @@ public void source(byte[] source, XContentType xContentType) {
* @param offset The offset in the byte array
* @param length The length of the data
*/
- public void source(byte[] source, int offset, int length, XContentType xContentType) {
- source(new BytesArray(source, offset, length), xContentType);
+ public void source(byte[] source, int offset, int length, XContentType contentType) {
+ source(new BytesArray(source, offset, length), contentType);
+ }
+
+ private void setSource(BytesReference source, XContentType contentType) {
+ setSource(source, contentType, NOOP_RELEASABLE);
+ }
+
+ private void setSource(BytesReference source, XContentType contentType, Releasable sourceReleasable) {
+ this.source = source;
+ this.contentType = contentType;
+ this.sourceReleasable.close();
+ this.sourceReleasable = sourceReleasable;
}
}
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexerTests.java
index e966e88bbd60b..b0a14611de221 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexerTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexerTests.java
@@ -9,6 +9,7 @@
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.SourceContext;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
@@ -79,6 +80,7 @@ private LimitAwareBulkIndexer createIndexer(long bytesLimit) {
private static IndexRequest mockIndexRequest(long ramBytes) {
IndexRequest indexRequest = mock(IndexRequest.class);
when(indexRequest.ramBytesUsed()).thenReturn(ramBytes);
+ when(indexRequest.sourceContext()).thenReturn(new SourceContext());
return indexRequest;
}
}
From 418638f43d23b9e1db3b0a5ee8472d06b2d1d0d1 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Tue, 2 Sep 2025 22:38:08 -0600
Subject: [PATCH 05/20] Fix
---
.../java/org/elasticsearch/action/index/SourceContext.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
index 7156e92c82d84..bbcbb791e0692 100644
--- a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
+++ b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
@@ -19,6 +19,7 @@
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.Releasable;
+import org.elasticsearch.core.Releasables;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.xcontent.XContentType;
@@ -239,7 +240,7 @@ private void setSource(BytesReference source, XContentType contentType) {
private void setSource(BytesReference source, XContentType contentType, Releasable sourceReleasable) {
this.source = source;
this.contentType = contentType;
- this.sourceReleasable.close();
+ Releasables.close(sourceReleasable);
this.sourceReleasable = sourceReleasable;
}
}
From 812e7e0a64048da37157dee26024f9b020e31f2a Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Wed, 3 Sep 2025 14:53:22 -0600
Subject: [PATCH 06/20] Change
---
.../action/bulk/BulkRequestParser.java | 23 +++++++------
.../action/index/IndexRequest.java | 15 ++++-----
.../action/index/SourceContext.java | 2 +-
.../rest/action/document/RestBulkAction.java | 33 ++++++++++++-------
.../action/bulk/BulkRequestParserTests.java | 20 ++++++-----
5 files changed, 54 insertions(+), 39 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java
index 2f336566953ba..234ad7b89af2e 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java
@@ -14,6 +14,7 @@
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
@@ -107,11 +108,12 @@ private static int findNextMarker(byte marker, int from, BytesReference data, bo
* Returns the sliced {@link BytesReference}. If the {@link XContentType} is JSON, the byte preceding the marker is checked to see
* if it is a carriage return and if so, the BytesReference is sliced so that the carriage return is ignored
*/
- private static BytesReference sliceTrimmingCarriageReturn(
- BytesReference bytesReference,
+ private static ReleasableBytesReference sliceTrimmingCarriageReturn(
+ ReleasableBytesReference bytesReference,
int from,
int nextMarker,
- XContentType xContentType
+ XContentType xContentType,
+ boolean retain
) {
final int length;
if (XContentType.JSON == xContentType && bytesReference.get(nextMarker - 1) == (byte) '\r') {
@@ -119,7 +121,7 @@ private static BytesReference sliceTrimmingCarriageReturn(
} else {
length = nextMarker - from;
}
- return bytesReference.slice(from, length);
+ return retain ? bytesReference.retainedSlice(from, length) : bytesReference.slice(from, length);
}
/**
@@ -157,7 +159,7 @@ public void parse(
deleteRequestConsumer
);
- incrementalParser.parse(data, true);
+ incrementalParser.parse(ReleasableBytesReference.wrap(data), true);
}
public IncrementalParser incrementalParser(
@@ -251,7 +253,7 @@ private IncrementalParser(
this.deleteRequestConsumer = deleteRequestConsumer;
}
- public int parse(BytesReference data, boolean lastData) throws IOException {
+ public int parse(ReleasableBytesReference data, boolean lastData) throws IOException {
if (failure != null) {
assert false : failure.getMessage();
throw new IllegalStateException("Parser has already encountered exception", failure);
@@ -264,7 +266,7 @@ public int parse(BytesReference data, boolean lastData) throws IOException {
}
}
- private int tryParse(BytesReference data, boolean lastData) throws IOException {
+ private int tryParse(ReleasableBytesReference data, boolean lastData) throws IOException {
int from = 0;
int consumed = 0;
@@ -523,16 +525,17 @@ private boolean parseActionLine(BytesReference data, int from, int to) throws IO
return true;
}
- private void parseAndConsumeDocumentLine(BytesReference data, int from, int to) throws IOException {
+ private void parseAndConsumeDocumentLine(ReleasableBytesReference data, int from, int to) throws IOException {
assert currentRequest != null && currentRequest instanceof DeleteRequest == false;
if (currentRequest instanceof IndexRequest indexRequest) {
- indexRequest.source(sliceTrimmingCarriageReturn(data, from, to, xContentType), xContentType);
+ ReleasableBytesReference indexSource = sliceTrimmingCarriageReturn(data, from, to, xContentType, true);
+ indexRequest.sourceContext().source(indexSource, xContentType);
indexRequestConsumer.accept(indexRequest, currentType);
} else if (currentRequest instanceof UpdateRequest updateRequest) {
try (
XContentParser sliceParser = createParser(
xContentType.xContent(),
- sliceTrimmingCarriageReturn(data, from, to, xContentType)
+ sliceTrimmingCarriageReturn(data, from, to, xContentType, false)
)
) {
updateRequest.fromXContent(sliceParser);
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 6ee922ae6b728..4d0532471dc89 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -95,7 +95,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement
@Nullable
private String routing;
- private SourceContext sourceContext = new SourceContext();
+ private final SourceContext sourceContext;
private OpType opType = OpType.INDEX;
@@ -162,10 +162,11 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
routing = in.readOptionalString();
boolean beforeSourceContext = in.getTransportVersion().before(TransportVersions.SOURCE_CONTEXT);
BytesReference source;
+ SourceContext localSourceContext = null;
if (beforeSourceContext) {
source = in.readBytesReference();
} else {
- sourceContext = new SourceContext(in);
+ localSourceContext = new SourceContext(in);
source = null;
}
opType = OpType.fromId(in.readByte());
@@ -184,8 +185,9 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
} else {
contentType = null;
}
- sourceContext = new SourceContext(contentType, source, () -> {});
+ localSourceContext = new SourceContext(contentType, source);
}
+ sourceContext = Objects.requireNonNull(localSourceContext);
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
requireAlias = in.readBoolean();
@@ -226,6 +228,7 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
public IndexRequest() {
super(NO_SHARD_ID);
+ this.sourceContext = new SourceContext();
}
/**
@@ -235,6 +238,7 @@ public IndexRequest() {
public IndexRequest(String index) {
super(NO_SHARD_ID);
this.index = index;
+ this.sourceContext = new SourceContext();
}
private static final StringLiteralDeduplicator pipelineNameDeduplicator = new StringLiteralDeduplicator();
@@ -500,11 +504,6 @@ public IndexRequest source(XContentType xContentType, Object... source) {
return this;
}
- public IndexRequest sourceContext(SourceContext sourceContext) {
- sourceContext = Objects.requireNonNull(sourceContext);
- return this;
- }
-
/**
* Sets the document to index in bytes form.
*/
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
index bbcbb791e0692..974062b435d40 100644
--- a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
+++ b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
@@ -86,7 +86,7 @@ public int byteLength() {
@Override
public void close() {
- sourceReleasable.close();
+ Releasables.close(sourceReleasable);
}
public Map sourceAsMap() {
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
index 20a9fb7ed23aa..97720c6fceabb 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
@@ -16,14 +16,14 @@
import org.elasticsearch.action.bulk.BulkRequestParser;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.bulk.IncrementalBulkService;
+import org.elasticsearch.action.bulk.TransportBulkAction;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.client.internal.node.NodeClient;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.CompositeBytesReference;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.rest.BaseRestHandler;
@@ -217,7 +217,7 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo
return;
}
- final BytesReference data;
+ final ReleasableBytesReference data;
int bytesConsumed;
if (chunk.length() == 0) {
chunk.close();
@@ -228,7 +228,8 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo
unParsedChunks.add(chunk);
if (unParsedChunks.size() > 1) {
- data = CompositeBytesReference.of(unParsedChunks.toArray(new ReleasableBytesReference[0]));
+ ReleasableBytesReference[] components = unParsedChunks.toArray(new ReleasableBytesReference[0]);
+ data = new ReleasableBytesReference(CompositeBytesReference.of(components), () -> Releasables.close(components));
} else {
data = chunk;
}
@@ -243,7 +244,8 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo
}
}
- final ArrayList releasables = accountParsing(bytesConsumed);
+ releaseConsumeBytes(bytesConsumed);
+
if (isLast) {
assert unParsedChunks.isEmpty();
if (handler.getIncrementalOperation().totalParsedBytes() == 0) {
@@ -253,24 +255,33 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo
assert channel != null;
ArrayList> toPass = new ArrayList<>(items);
items.clear();
- handler.lastItems(toPass, () -> Releasables.close(releasables), new RestRefCountedChunkedToXContentListener<>(channel));
+ handler.lastItems(toPass, () -> closeSourceContexts(toPass), new RestRefCountedChunkedToXContentListener<>(channel));
}
handler.updateWaitForChunkMetrics(TimeUnit.NANOSECONDS.toMillis(totalChunkWaitTimeInNanos));
totalChunkWaitTimeInNanos = 0L;
} else if (items.isEmpty() == false) {
ArrayList> toPass = new ArrayList<>(items);
items.clear();
- handler.addItems(toPass, () -> Releasables.close(releasables), () -> {
+ handler.addItems(toPass, () -> closeSourceContexts(toPass), () -> {
requestNextChunkTime = System.nanoTime();
request.contentStream().next();
});
} else {
- Releasables.close(releasables);
requestNextChunkTime = System.nanoTime();
request.contentStream().next();
}
}
+ private static void closeSourceContexts(ArrayList> requests) {
+ // We only slice for index requests currently.
+ for (DocWriteRequest> request : requests) {
+ IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(request);
+ if (indexRequest != null) {
+ indexRequest.sourceContext().close();
+ }
+ }
+ }
+
@Override
public void streamClose() {
assert Transports.assertTransportThread();
@@ -286,19 +297,17 @@ private void shortCircuit() {
unParsedChunks.clear();
}
- private ArrayList accountParsing(int bytesConsumed) {
- ArrayList releasables = new ArrayList<>(unParsedChunks.size());
+ private void releaseConsumeBytes(int bytesConsumed) {
while (bytesConsumed > 0) {
ReleasableBytesReference reference = unParsedChunks.removeFirst();
- releasables.add(reference);
if (bytesConsumed >= reference.length()) {
bytesConsumed -= reference.length();
} else {
unParsedChunks.addFirst(reference.retainedSlice(bytesConsumed, reference.length() - bytesConsumed));
bytesConsumed = 0;
}
+ reference.close();
}
- return releasables;
}
}
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java
index de2dc8f9ea0b5..bad567dab3537 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java
@@ -12,6 +12,7 @@
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.core.RestApiVersion;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.test.ESTestCase;
@@ -34,10 +35,10 @@ public class BulkRequestParserTests extends ESTestCase {
.toList();
public void testParserCannotBeReusedAfterFailure() {
- BytesArray request = new BytesArray("""
+ ReleasableBytesReference request = ReleasableBytesReference.wrap(new BytesArray("""
{ "index":{ }, "something": "unexpected" }
{}
- """);
+ """));
BulkRequestParser parser = new BulkRequestParser(randomBoolean(), true, RestApiVersion.current());
BulkRequestParser.IncrementalParser incrementalParser = parser.incrementalParser(
@@ -58,10 +59,10 @@ public void testParserCannotBeReusedAfterFailure() {
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> incrementalParser.parse(request, false));
assertEquals("Malformed action/metadata line [1], expected END_OBJECT but found [FIELD_NAME]", ex.getMessage());
- BytesArray valid = new BytesArray("""
+ ReleasableBytesReference valid = ReleasableBytesReference.wrap(new BytesArray("""
{ "index":{ "_id": "bar" } }
{}
- """);
+ """));
expectThrows(AssertionError.class, () -> incrementalParser.parse(valid, false));
}
@@ -86,7 +87,7 @@ public void testIncrementalParsing() throws IOException {
deleteRequests::add
);
- BytesArray request = new BytesArray("""
+ ReleasableBytesReference request = ReleasableBytesReference.wrap(new BytesArray("""
{ "index":{ "_id": "bar", "pipeline": "foo" } }
{ "field": "value"}
{ "index":{ "require_alias": false } }
@@ -97,7 +98,7 @@ public void testIncrementalParsing() throws IOException {
{ "index": { } }
{ "field": "value"}
{ "delete":{ "_id": "bop" } }
- """);
+ """));
int consumed = 0;
for (int i = 0; i < request.length() - 1; ++i) {
@@ -255,9 +256,12 @@ public void testBarfOnLackOfTrailingNewline() throws IOException {
);
// Should not throw because not last
- incrementalParser.parse(request, false);
+ incrementalParser.parse(ReleasableBytesReference.wrap(request), false);
- IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, () -> incrementalParser.parse(request, true));
+ IllegalArgumentException e2 = expectThrows(
+ IllegalArgumentException.class,
+ () -> incrementalParser.parse(ReleasableBytesReference.wrap(request), true)
+ );
assertEquals("The bulk request must be terminated by a newline [\\n]", e2.getMessage());
}
From 5b7b7d5e91f72d676a64e7a07418a8df9f0a73d5 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Wed, 3 Sep 2025 16:29:32 -0600
Subject: [PATCH 07/20] Change
---
.../org/elasticsearch/http/BulkRestIT.java | 45 ++++++++++------
.../action/bulk/BulkRequestParser.java | 10 ++--
.../action/index/SourceContext.java | 6 ++-
.../rest/action/document/RestBulkAction.java | 51 +++++++++++--------
4 files changed, 72 insertions(+), 40 deletions(-)
diff --git a/qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/BulkRestIT.java b/qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/BulkRestIT.java
index 3faa88339f0a3..4d8cb82a03cb0 100644
--- a/qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/BulkRestIT.java
+++ b/qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/BulkRestIT.java
@@ -52,11 +52,7 @@ public void testBulkUriMatchingDoesNotMatchBulkCapabilitiesApi() throws IOExcept
}
public void testBulkMissingBody() throws IOException {
- Request request = new Request(randomBoolean() ? "POST" : "PUT", "/_bulk");
- request.setJsonEntity("");
- ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request));
- assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
- assertThat(responseException.getMessage(), containsString("request body is required"));
+ sendMissingBody();
}
public void testBulkInvalidIndexNameString() throws IOException {
@@ -79,16 +75,7 @@ public void testBulkInvalidIndexNameString() throws IOException {
}
public void testBulkRequestBodyImproperlyTerminated() throws IOException {
- Request request = new Request(randomBoolean() ? "POST" : "PUT", "/_bulk");
- // missing final line of the bulk body. cannot process
- request.setJsonEntity(
- "{\"index\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n"
- + "{\"field\":1}\n"
- + "{\"index\":{\"_index\":\"index_name\",\"_id\":\"2\"}"
- );
- ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request));
- assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
- assertThat(responseException.getMessage(), containsString("The bulk request must be terminated by a newline"));
+ sendImproperlyTerminated();
}
public void testBulkRequest() throws IOException {
@@ -156,6 +143,9 @@ public void testBulkWithIncrementalDisabled() throws IOException {
try {
sendLargeBulk();
+ sendMalFormedActionLine();
+ sendImproperlyTerminated();
+ sendMissingBody();
} finally {
internalCluster().getInstances(IncrementalBulkService.class).forEach(i -> i.setForTests(true));
updateClusterSettings(Settings.builder().put(IncrementalBulkService.INCREMENTAL_BULK.getKey(), (String) null));
@@ -177,6 +167,31 @@ public void testMalformedActionLineBulk() throws IOException {
final Response indexCreatedResponse = getRestClient().performRequest(createRequest);
assertThat(indexCreatedResponse.getStatusLine().getStatusCode(), equalTo(OK.getStatus()));
+ sendMalFormedActionLine();
+ }
+
+ private static void sendMissingBody() {
+ Request request = new Request(randomBoolean() ? "POST" : "PUT", "/_bulk");
+ request.setJsonEntity("");
+ ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request));
+ assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
+ assertThat(responseException.getMessage(), containsString("request body is required"));
+ }
+
+ private static void sendImproperlyTerminated() {
+ Request request = new Request(randomBoolean() ? "POST" : "PUT", "/_bulk");
+ // missing final line of the bulk body. cannot process
+ request.setJsonEntity(
+ "{\"index\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n"
+ + "{\"field\":1}\n"
+ + "{\"index\":{\"_index\":\"index_name\",\"_id\":\"2\"}"
+ );
+ ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request));
+ assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
+ assertThat(responseException.getMessage(), containsString("The bulk request must be terminated by a newline"));
+ }
+
+ private static void sendMalFormedActionLine() throws IOException {
Request bulkRequest = new Request("POST", "/index_name/_bulk");
final StringBuilder bulk = new StringBuilder();
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java
index 234ad7b89af2e..fc69bc1087153 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java
@@ -159,7 +159,12 @@ public void parse(
deleteRequestConsumer
);
- incrementalParser.parse(ReleasableBytesReference.wrap(data), true);
+ incrementalParser.parse(
+ data instanceof ReleasableBytesReference releasableBytesReference
+ ? releasableBytesReference
+ : ReleasableBytesReference.wrap(data),
+ true
+ );
}
public IncrementalParser incrementalParser(
@@ -528,8 +533,7 @@ private boolean parseActionLine(BytesReference data, int from, int to) throws IO
private void parseAndConsumeDocumentLine(ReleasableBytesReference data, int from, int to) throws IOException {
assert currentRequest != null && currentRequest instanceof DeleteRequest == false;
if (currentRequest instanceof IndexRequest indexRequest) {
- ReleasableBytesReference indexSource = sliceTrimmingCarriageReturn(data, from, to, xContentType, true);
- indexRequest.sourceContext().source(indexSource, xContentType);
+ indexRequest.sourceContext().source(sliceTrimmingCarriageReturn(data, from, to, xContentType, true), xContentType);
indexRequestConsumer.accept(indexRequest, currentType);
} else if (currentRequest instanceof UpdateRequest updateRequest) {
try (
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
index 974062b435d40..2e55457ca5752 100644
--- a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
+++ b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
@@ -34,6 +34,7 @@ public class SourceContext implements Writeable, Releasable {
private XContentType contentType;
private BytesReference source;
private Releasable sourceReleasable;
+ private boolean isClosed = false;
public SourceContext() {}
@@ -86,6 +87,8 @@ public int byteLength() {
@Override
public void close() {
+ assert isClosed == false;
+ isClosed = true;
Releasables.close(sourceReleasable);
}
@@ -240,7 +243,8 @@ private void setSource(BytesReference source, XContentType contentType) {
private void setSource(BytesReference source, XContentType contentType, Releasable sourceReleasable) {
this.source = source;
this.contentType = contentType;
- Releasables.close(sourceReleasable);
+ Releasable toClose = this.sourceReleasable;
this.sourceReleasable = sourceReleasable;
+ Releasables.close(toClose);
}
}
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
index 97720c6fceabb..4a10c120ff796 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
@@ -113,11 +113,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
bulkRequest.setRefreshPolicy(request.param("refresh"));
bulkRequest.includeSourceOnError(RestUtils.getIncludeSourceOnError(request));
bulkRequest.requestParamsUsed(request.params().keySet());
- ReleasableBytesReference content = request.requiredContent();
try {
bulkRequest.add(
- content,
+ request.requiredContent(),
defaultIndex,
defaultRouting,
defaultFetchSourceContext,
@@ -130,12 +129,16 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
request.getRestApiVersion()
);
} catch (Exception e) {
+ closeSourceContexts(bulkRequest.requests());
return channel -> new RestToXContentListener<>(channel).onFailure(parseFailureException(e));
}
- return channel -> {
- content.mustIncRef();
- client.bulk(bulkRequest, ActionListener.releaseAfter(new RestRefCountedChunkedToXContentListener<>(channel), content));
- };
+
+ // The request list is actually mutable so we make a copy for close.
+ List> toClose = new ArrayList<>(bulkRequest.requests());
+ return channel -> client.bulk(
+ bulkRequest,
+ ActionListener.releaseAfter(new RestRefCountedChunkedToXContentListener<>(channel), () -> closeSourceContexts(toClose))
+ );
} else {
request.ensureContent();
String waitForActiveShards = request.param("wait_for_active_shards");
@@ -217,26 +220,30 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo
return;
}
- final ReleasableBytesReference data;
int bytesConsumed;
if (chunk.length() == 0) {
chunk.close();
bytesConsumed = 0;
} else {
+ final ReleasableBytesReference data;
try {
handler.getIncrementalOperation().incrementUnparsedBytes(chunk.length());
unParsedChunks.add(chunk);
if (unParsedChunks.size() > 1) {
ReleasableBytesReference[] components = unParsedChunks.toArray(new ReleasableBytesReference[0]);
+ for (ReleasableBytesReference reference : components) {
+ reference.incRef();
+ }
data = new ReleasableBytesReference(CompositeBytesReference.of(components), () -> Releasables.close(components));
} else {
- data = chunk;
+ data = chunk.retain();
}
- bytesConsumed = parser.parse(data, isLast);
- handler.getIncrementalOperation().transferUnparsedBytesToParsed(bytesConsumed);
-
+ try (ReleasableBytesReference toClose = data) {
+ bytesConsumed = parser.parse(data, isLast);
+ handler.getIncrementalOperation().transferUnparsedBytesToParsed(bytesConsumed);
+ }
} catch (Exception e) {
shortCircuit();
new RestToXContentListener<>(channel).onFailure(parseFailureException(e));
@@ -272,16 +279,6 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo
}
}
- private static void closeSourceContexts(ArrayList> requests) {
- // We only slice for index requests currently.
- for (DocWriteRequest> request : requests) {
- IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(request);
- if (indexRequest != null) {
- indexRequest.sourceContext().close();
- }
- }
- }
-
@Override
public void streamClose() {
assert Transports.assertTransportThread();
@@ -294,6 +291,8 @@ private void shortCircuit() {
shortCircuited = true;
Releasables.close(handler);
Releasables.close(unParsedChunks);
+ closeSourceContexts(items);
+ items.clear();
unParsedChunks.clear();
}
@@ -309,6 +308,16 @@ private void releaseConsumeBytes(int bytesConsumed) {
reference.close();
}
}
+
+ }
+
+ private static void closeSourceContexts(List> requests) {
+ for (DocWriteRequest> request : requests) {
+ IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(request);
+ if (indexRequest != null) {
+ indexRequest.sourceContext().close();
+ }
+ }
}
@Override
From 55853cd1d88df704bb17c8e5d0325449a651d213 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Wed, 3 Sep 2025 19:04:41 -0600
Subject: [PATCH 08/20] Change
---
.../action/index/SourceContext.java | 31 ++++-----
.../ShardBulkInferenceActionFilter.java | 68 ++++++++++---------
2 files changed, 49 insertions(+), 50 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
index 2e55457ca5752..619ff559d6808 100644
--- a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
+++ b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
@@ -29,23 +29,15 @@
public class SourceContext implements Writeable, Releasable {
- private static final Releasable NOOP_RELEASABLE = () -> {};
-
private XContentType contentType;
- private BytesReference source;
- private Releasable sourceReleasable;
+ private ReleasableBytesReference source;
private boolean isClosed = false;
public SourceContext() {}
public SourceContext(XContentType contentType, BytesReference source) {
- this(contentType, source, NOOP_RELEASABLE);
- }
-
- public SourceContext(XContentType contentType, BytesReference source, Releasable sourceReleasable) {
this.contentType = contentType;
- this.source = source;
- this.sourceReleasable = sourceReleasable;
+ this.source = ReleasableBytesReference.wrap(source);
}
public SourceContext(StreamInput in) throws IOException {
@@ -55,7 +47,7 @@ public SourceContext(StreamInput in) throws IOException {
} else {
contentType = null;
}
- source = in.readBytesReference();
+ source = ReleasableBytesReference.wrap(in.readBytesReference());
}
@Override
@@ -77,6 +69,10 @@ public BytesReference bytes() {
return source;
}
+ public ReleasableBytesReference retainedBytes() {
+ return source.retain();
+ }
+
public boolean hasSource() {
return source != null;
}
@@ -89,7 +85,9 @@ public int byteLength() {
public void close() {
assert isClosed == false;
isClosed = true;
- Releasables.close(sourceReleasable);
+ Releasables.close(source);
+ source = null;
+ contentType = null;
}
public Map sourceAsMap() {
@@ -207,7 +205,7 @@ public static XContentBuilder getXContentBuilder(XContentType xContentType, Obje
}
public void source(ReleasableBytesReference source, XContentType contentType) {
- setSource(source, contentType, source);
+ setSource(source, contentType);
}
/**
@@ -237,14 +235,13 @@ public void source(byte[] source, int offset, int length, XContentType contentTy
}
private void setSource(BytesReference source, XContentType contentType) {
- setSource(source, contentType, NOOP_RELEASABLE);
+ setSource(ReleasableBytesReference.wrap(source), contentType);
}
- private void setSource(BytesReference source, XContentType contentType, Releasable sourceReleasable) {
+ private void setSource(ReleasableBytesReference source, XContentType contentType) {
+ Releasable toClose = this.source;
this.source = source;
this.contentType = contentType;
- Releasable toClose = this.sourceReleasable;
- this.sourceReleasable = sourceReleasable;
Releasables.close(toClose);
}
}
diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
index 07ad13c72bcb8..fc200e8dbcf79 100644
--- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
+++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
@@ -26,6 +26,7 @@
import org.elasticsearch.cluster.metadata.ProjectMetadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.AtomicArray;
@@ -724,43 +725,44 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons
inferenceFieldsMap.put(fieldName, result);
}
- BytesReference originalSource = indexRequest.sourceContext().bytes();
- if (useLegacyFormat) {
- var newDocMap = indexRequest.sourceAsMap();
- for (var entry : inferenceFieldsMap.entrySet()) {
- XContentMapValues.insertValue(entry.getKey(), newDocMap, entry.getValue());
+ try (ReleasableBytesReference originalSource = indexRequest.sourceContext().retainedBytes()) {
+ if (useLegacyFormat) {
+ var newDocMap = indexRequest.sourceAsMap();
+ for (var entry : inferenceFieldsMap.entrySet()) {
+ XContentMapValues.insertValue(entry.getKey(), newDocMap, entry.getValue());
+ }
+ indexRequest.source(newDocMap, indexRequest.getContentType());
+ } else {
+ try (XContentBuilder builder = XContentBuilder.builder(indexRequest.getContentType().xContent())) {
+ appendSourceAndInferenceMetadata(
+ builder,
+ indexRequest.sourceContext().bytes(),
+ indexRequest.getContentType(),
+ inferenceFieldsMap
+ );
+ indexRequest.source(builder);
+ }
}
- indexRequest.source(newDocMap, indexRequest.getContentType());
- } else {
- try (XContentBuilder builder = XContentBuilder.builder(indexRequest.getContentType().xContent())) {
- appendSourceAndInferenceMetadata(
- builder,
- indexRequest.sourceContext().bytes(),
- indexRequest.getContentType(),
- inferenceFieldsMap
+ long modifiedSourceSize = indexRequest.sourceContext().bytes().length();
+
+ // Add the indexing pressure from the source modifications.
+ // Don't increment operation count because we count one source update as one operation, and we already accounted for those
+ // in addFieldInferenceRequests.
+ try {
+ coordinatingIndexingPressure.increment(0, modifiedSourceSize - originalSource.length());
+ } catch (EsRejectedExecutionException e) {
+ indexRequest.source(originalSource, indexRequest.getContentType());
+ item.abort(
+ item.index(),
+ new InferenceException(
+ "Unable to insert inference results into document ["
+ + indexRequest.id()
+ + "] due to memory pressure. Please retry the bulk request with fewer documents or smaller document sizes.",
+ e
+ )
);
- indexRequest.source(builder);
}
}
- long modifiedSourceSize = indexRequest.sourceContext().bytes().length();
-
- // Add the indexing pressure from the source modifications.
- // Don't increment operation count because we count one source update as one operation, and we already accounted for those
- // in addFieldInferenceRequests.
- try {
- coordinatingIndexingPressure.increment(0, modifiedSourceSize - originalSource.length());
- } catch (EsRejectedExecutionException e) {
- indexRequest.source(originalSource, indexRequest.getContentType());
- item.abort(
- item.index(),
- new InferenceException(
- "Unable to insert inference results into document ["
- + indexRequest.id()
- + "] due to memory pressure. Please retry the bulk request with fewer documents or smaller document sizes.",
- e
- )
- );
- }
}
}
From 7ad66920908ad6baf7bb6492076ce7d7260b7ec2 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Wed, 3 Sep 2025 20:42:55 -0600
Subject: [PATCH 09/20] Fixes
---
.../elasticsearch/rest/action/document/RestIndexAction.java | 4 ++--
.../action/filter/ShardBulkInferenceActionFilter.java | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
index f4405e6c912f4..5e401e81cdfee 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
@@ -132,7 +132,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
indexRequest.id(request.param("id"));
indexRequest.routing(request.param("routing"));
indexRequest.setPipeline(request.param("pipeline"));
- indexRequest.source(source, request.getXContentType());
+ indexRequest.sourceContext().source(source, request.getXContentType());
indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT));
indexRequest.setRefreshPolicy(request.param("refresh"));
indexRequest.version(RestActions.parseVersion(request));
@@ -157,7 +157,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
indexRequest,
ActionListener.releaseAfter(
new RestToXContentListener<>(channel, DocWriteResponse::status, r -> r.getLocation(indexRequest.routing())),
- source
+ indexRequest.sourceContext()
)
);
};
diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
index fc200e8dbcf79..f0be3bdabd49b 100644
--- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
+++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
@@ -751,7 +751,7 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons
try {
coordinatingIndexingPressure.increment(0, modifiedSourceSize - originalSource.length());
} catch (EsRejectedExecutionException e) {
- indexRequest.source(originalSource, indexRequest.getContentType());
+ indexRequest.sourceContext().source(originalSource.retain(), indexRequest.getContentType());
item.abort(
item.index(),
new InferenceException(
From eab166c0d78d415f3ce6e3242817e8656cbf859e Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Wed, 3 Sep 2025 21:23:37 -0600
Subject: [PATCH 10/20] Fixes
---
.../plugin/noop/action/bulk/RestNoopBulkAction.java | 1 +
.../org/elasticsearch/system/indices/SystemIndicesQA.java | 7 +++++--
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java
index 12f039a5a122f..1d696f392a624 100644
--- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java
@@ -78,6 +78,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
request.getRestApiVersion()
);
+ // TODO: Currently broken because of pooling
// short circuit the call to the transport layer
return channel -> {
BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);
diff --git a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java
index 46c6d1b9228d6..61907151da2c3 100644
--- a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java
+++ b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java
@@ -180,10 +180,13 @@ public List routes() {
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
var content = request.requiredContent();
IndexRequest indexRequest = new IndexRequest(".net-new-system-index-primary");
- indexRequest.source(content, request.getXContentType());
+ indexRequest.sourceContext().source(content.retain(), request.getXContentType());
indexRequest.id(request.param("id"));
indexRequest.setRefreshPolicy(request.param("refresh"));
- return channel -> client.index(indexRequest, ActionListener.withRef(new RestToXContentListener<>(channel), content));
+ return channel -> client.index(
+ indexRequest,
+ ActionListener.releaseAfter(new RestToXContentListener<>(channel), indexRequest.sourceContext())
+ );
}
@Override
From 7d16bba07d5b62a91e59f6a9cb7430deeb4d43e3 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Thu, 4 Sep 2025 12:45:10 -0600
Subject: [PATCH 11/20] Change
---
.../noop/action/bulk/RestNoopBulkAction.java | 8 +-
...lureStoreMetricsWithIncrementalBulkIT.java | 26 ++--
.../action/bulk/IncrementalBulkIT.java | 130 +++++++++---------
.../metrics/NodeIndexingMetricsIT.java | 49 +++----
.../elasticsearch/action/DocWriteRequest.java | 12 +-
.../action/bulk/IncrementalBulkService.java | 20 +--
.../action/index/SourceContext.java | 12 ++
.../bytes/ReleasableBytesReference.java | 10 ++
.../rest/action/document/RestBulkAction.java | 30 ++--
.../action/document/RestBulkActionTests.java | 10 +-
10 files changed, 168 insertions(+), 139 deletions(-)
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java
index 1d696f392a624..f312075c0c05d 100644
--- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java
@@ -8,6 +8,7 @@
*/
package org.elasticsearch.plugin.noop.action.bulk;
+import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
@@ -16,6 +17,7 @@
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.internal.node.NodeClient;
+import org.elasticsearch.core.Releasables;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
@@ -25,6 +27,7 @@
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.rest.RestRequest.Method.POST;
@@ -78,11 +81,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
request.getRestApiVersion()
);
- // TODO: Currently broken because of pooling
+ // The actual bulk request items are mutable during the bulk process so we must create a copy
+ List> toClose = new ArrayList<>(bulkRequest.requests());
// short circuit the call to the transport layer
return channel -> {
BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);
- listener.onResponse(bulkRequest);
+ ActionListener.releaseAfter(listener, () -> Releasables.close(toClose)).onResponse(bulkRequest);
};
}
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java
index 4ae28e8309007..4246ff1552349 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java
@@ -18,6 +18,7 @@
import org.elasticsearch.action.datastreams.CreateDataStreamAction;
import org.elasticsearch.action.datastreams.GetDataStreamAction;
import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.SourceContext;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStream;
@@ -25,7 +26,6 @@
import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.core.AbstractRefCounted;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Strings;
import org.elasticsearch.index.Index;
@@ -89,7 +89,7 @@ public void testShortCircuitFailure() throws Exception {
String coordinatingOnlyNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ final ArrayList contextsToRelease = new ArrayList<>();
IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, coordinatingOnlyNode);
try (IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest()) {
@@ -97,8 +97,9 @@ public void testShortCircuitFailure() throws Exception {
int successfullyStored = 0;
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, () -> nextRequested.set(true));
+ IndexRequest indexRequest = indexRequest(DATA_STREAM_NAME);
+ contextsToRelease.add(indexRequest.sourceContext());
+ handler.addItems(List.of(indexRequest), () -> nextRequested.set(true));
successfullyStored++;
}
assertBusy(() -> assertTrue(nextRequested.get()));
@@ -116,12 +117,13 @@ public void testShortCircuitFailure() throws Exception {
while (primaryPressure.stats().getPrimaryRejections() == primaryRejections) {
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
List> requests = new ArrayList<>();
for (int i = 0; i < 20; ++i) {
- requests.add(indexRequest(DATA_STREAM_NAME));
+ IndexRequest indexRequest = indexRequest(DATA_STREAM_NAME);
+ contextsToRelease.add(indexRequest.sourceContext());
+ requests.add(indexRequest);
}
- handler.addItems(requests, refCounted::decRef, () -> nextRequested.set(true));
+ handler.addItems(requests, () -> nextRequested.set(true));
}
assertBusy(() -> assertTrue(nextRequested.get()));
}
@@ -129,16 +131,20 @@ public void testShortCircuitFailure() throws Exception {
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, () -> nextRequested.set(true));
+ IndexRequest indexRequest = indexRequest(DATA_STREAM_NAME);
+ contextsToRelease.add(indexRequest.sourceContext());
+ handler.addItems(List.of(indexRequest), () -> nextRequested.set(true));
}
assertBusy(() -> assertTrue(nextRequested.get()));
PlainActionFuture future = new PlainActionFuture<>();
- handler.lastItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, future);
+ IndexRequest lastRequest = indexRequest(DATA_STREAM_NAME);
+ contextsToRelease.add(lastRequest.sourceContext());
+ handler.lastItems(List.of(lastRequest), future);
BulkResponse bulkResponse = safeGet(future);
+ assertThat(contextsToRelease.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
for (int i = 0; i < bulkResponse.getItems().length; ++i) {
// the first requests were successful
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java
index d4d49bc4aa87a..a4ed25798e05e 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java
@@ -11,13 +11,13 @@
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.SourceContext;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
-import org.elasticsearch.core.AbstractRefCounted;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
@@ -80,8 +80,7 @@ public void testSingleBulkRequest() {
IndexRequest indexRequest = indexRequest(index);
PlainActionFuture future = new PlainActionFuture<>();
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
- handler.lastItems(List.of(indexRequest), refCounted::decRef, future);
+ handler.lastItems(List.of(indexRequest), future);
BulkResponse bulkResponse = safeGet(future);
assertNoFailures(bulkResponse);
@@ -93,7 +92,7 @@ public void testSingleBulkRequest() {
assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) 1));
});
- assertFalse(refCounted.hasReferences());
+ assertTrue(indexRequest.sourceContext().isClosed());
}
public void testBufferedResourcesReleasedOnClose() {
@@ -107,15 +106,14 @@ public void testBufferedResourcesReleasedOnClose() {
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
IndexRequest indexRequest = indexRequest(index);
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
- handler.addItems(List.of(indexRequest), refCounted::decRef, () -> {});
+ handler.addItems(List.of(indexRequest), () -> {});
- assertTrue(refCounted.hasReferences());
+ assertFalse(indexRequest.sourceContext().isClosed());
assertThat(indexingPressure.stats().getCurrentCoordinatingBytes(), greaterThan(0L));
handler.close();
- assertFalse(refCounted.hasReferences());
+ assertTrue(indexRequest.sourceContext().isClosed());
assertThat(indexingPressure.stats().getCurrentCoordinatingBytes(), equalTo(0L));
}
@@ -129,20 +127,20 @@ public void testIndexingPressureRejection() {
try (Releasable r = indexingPressure.markCoordinatingOperationStarted(1, indexingPressure.stats().getMemoryLimit(), true)) {
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ ArrayList contextsToClose = new ArrayList<>();
if (randomBoolean()) {
AtomicBoolean nextPage = new AtomicBoolean(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index)), refCounted::decRef, () -> nextPage.set(true));
+ handler.addItems(List.of(indexRequest(index, contextsToClose)), () -> nextPage.set(true));
assertTrue(nextPage.get());
}
PlainActionFuture future = new PlainActionFuture<>();
- handler.lastItems(List.of(indexRequest(index)), refCounted::decRef, future);
+ handler.lastItems(List.of(indexRequest(index, contextsToClose)), future);
expectThrows(EsRejectedExecutionException.class, future::actionGet);
- assertFalse(refCounted.hasReferences());
+
+ assertThat(contextsToClose.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
}
}
@@ -156,7 +154,7 @@ public void testIncrementalBulkLowWatermarkBackOff() throws Exception {
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ ArrayList contextsToClose = new ArrayList<>();
AtomicBoolean nextPage = new AtomicBoolean(false);
IndexRequest indexRequest = indexRequest(index);
@@ -164,8 +162,8 @@ public void testIncrementalBulkLowWatermarkBackOff() throws Exception {
long lowWaterMarkSplits = indexingPressure.stats().getLowWaterMarkSplits();
long highWaterMarkSplits = indexingPressure.stats().getHighWaterMarkSplits();
while (total < 2048) {
- refCounted.incRef();
- handler.addItems(List.of(indexRequest), refCounted::decRef, () -> nextPage.set(true));
+ contextsToClose.add(indexRequest.sourceContext());
+ handler.addItems(List.of(indexRequest), () -> nextPage.set(true));
assertTrue(nextPage.get());
nextPage.set(false);
indexRequest = indexRequest(index);
@@ -173,19 +171,18 @@ public void testIncrementalBulkLowWatermarkBackOff() throws Exception {
}
assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(0L));
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index)), refCounted::decRef, () -> nextPage.set(true));
+ handler.addItems(List.of(indexRequest(index, contextsToClose)), () -> nextPage.set(true));
assertBusy(() -> assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(0L)));
assertBusy(() -> assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(lowWaterMarkSplits + 1)));
assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(highWaterMarkSplits));
PlainActionFuture future = new PlainActionFuture<>();
- handler.lastItems(List.of(indexRequest), refCounted::decRef, future);
+ handler.lastItems(List.of(indexRequest(index, contextsToClose)), future);
BulkResponse bulkResponse = safeGet(future);
assertNoFailures(bulkResponse);
- assertFalse(refCounted.hasReferences());
+ assertThat(contextsToClose.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
}
public void testIncrementalBulkHighWatermarkBackOff() throws Exception {
@@ -199,28 +196,26 @@ public void testIncrementalBulkHighWatermarkBackOff() throws Exception {
long lowWaterMarkSplits = indexingPressure.stats().getLowWaterMarkSplits();
long highWaterMarkSplits = indexingPressure.stats().getHighWaterMarkSplits();
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ ArrayList contextsToClose = new ArrayList<>();
AtomicBoolean nextPage = new AtomicBoolean(false);
ArrayList handlers = new ArrayList<>();
for (int i = 0; i < 4; ++i) {
ArrayList> requests = new ArrayList<>();
- add512BRequests(requests, index);
+ add512BRequests(requests, contextsToClose, index);
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
handlers.add(handler);
- refCounted.incRef();
- handler.addItems(requests, refCounted::decRef, () -> nextPage.set(true));
+ handler.addItems(requests, () -> nextPage.set(true));
assertTrue(nextPage.get());
nextPage.set(false);
}
// Test that a request smaller than SPLIT_BULK_HIGH_WATERMARK_SIZE (1KB) is not throttled
ArrayList> requestsNoThrottle = new ArrayList<>();
- add512BRequests(requestsNoThrottle, index);
+ add512BRequests(requestsNoThrottle, contextsToClose, index);
IncrementalBulkService.Handler handlerNoThrottle = incrementalBulkService.newBulkRequest();
handlers.add(handlerNoThrottle);
- refCounted.incRef();
- handlerNoThrottle.addItems(requestsNoThrottle, refCounted::decRef, () -> nextPage.set(true));
+ handlerNoThrottle.addItems(requestsNoThrottle, () -> nextPage.set(true));
assertTrue(nextPage.get());
nextPage.set(false);
assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(highWaterMarkSplits));
@@ -228,14 +223,13 @@ public void testIncrementalBulkHighWatermarkBackOff() throws Exception {
ArrayList> requestsThrottle = new ArrayList<>();
// Test that a request larger than SPLIT_BULK_HIGH_WATERMARK_SIZE (1KB) is throttled
- add512BRequests(requestsThrottle, index);
- add512BRequests(requestsThrottle, index);
+ add512BRequests(requestsThrottle, contextsToClose, index);
+ add512BRequests(requestsThrottle, contextsToClose, index);
CountDownLatch finishLatch = new CountDownLatch(1);
blockWriteCoordinationPool(threadPool, finishLatch);
IncrementalBulkService.Handler handlerThrottled = incrementalBulkService.newBulkRequest();
- refCounted.incRef();
- handlerThrottled.addItems(requestsThrottle, refCounted::decRef, () -> nextPage.set(true));
+ handlerThrottled.addItems(requestsThrottle, () -> nextPage.set(true));
assertFalse(nextPage.get());
finishLatch.countDown();
@@ -247,16 +241,14 @@ public void testIncrementalBulkHighWatermarkBackOff() throws Exception {
assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(lowWaterMarkSplits));
for (IncrementalBulkService.Handler h : handlers) {
- refCounted.incRef();
PlainActionFuture future = new PlainActionFuture<>();
- h.lastItems(List.of(indexRequest(index)), refCounted::decRef, future);
+ h.lastItems(List.of(indexRequest(index, contextsToClose)), future);
BulkResponse bulkResponse = safeGet(future);
assertNoFailures(bulkResponse);
}
assertBusy(() -> assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(0L)));
- refCounted.decRef();
- assertFalse(refCounted.hasReferences());
+ assertThat(contextsToClose.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
}
public void testMultipleBulkPartsWithBackoff() {
@@ -306,10 +298,10 @@ public void testGlobalBulkFailure() throws InterruptedException {
);
} else {
PlainActionFuture future = new PlainActionFuture<>();
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
- handler.lastItems(List.of(indexRequest(index)), refCounted::decRef, future);
- assertFalse(refCounted.hasReferences());
+ IndexRequest indexRequest = indexRequest(index);
+ handler.lastItems(List.of(indexRequest), future);
expectThrows(EsRejectedExecutionException.class, future::actionGet);
+ assertTrue(indexRequest.sourceContext().isClosed());
}
}
}
@@ -325,7 +317,7 @@ public void testBulkLevelBulkFailureAfterFirstIncrementalRequest() throws Except
IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, randomNodeName);
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, randomNodeName);
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ ArrayList contextsToClose = new ArrayList<>();
PlainActionFuture future = new PlainActionFuture<>();
CountDownLatch blockingLatch1 = new CountDownLatch(1);
@@ -336,8 +328,7 @@ public void testBulkLevelBulkFailureAfterFirstIncrementalRequest() throws Except
blockWriteCoordinationPool(threadPool, blockingLatch1);
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index)), refCounted::decRef, () -> nextRequested.set(true));
+ handler.addItems(List.of(indexRequest(index, contextsToClose)), () -> nextRequested.set(true));
hits.incrementAndGet();
}
} finally {
@@ -351,7 +342,7 @@ public void testBulkLevelBulkFailureAfterFirstIncrementalRequest() throws Except
blockWriteCoordinationPool(threadPool, blockingLatch2);
fillWriteCoordinationQueue(threadPool);
- handler.lastItems(List.of(indexRequest(index)), refCounted::decRef, future);
+ handler.lastItems(List.of(indexRequest(index, contextsToClose)), future);
} finally {
blockingLatch2.countDown();
}
@@ -379,7 +370,7 @@ public void testShortCircuitShardLevelFailure() throws Exception {
String coordinatingOnlyNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ ArrayList contextsToClose = new ArrayList<>();
IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, coordinatingOnlyNode);
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
@@ -387,8 +378,7 @@ public void testShortCircuitShardLevelFailure() throws Exception {
AtomicLong hits = new AtomicLong(0);
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index)), refCounted::decRef, () -> nextRequested.set(true));
+ handler.addItems(List.of(indexRequest(index, contextsToClose)), () -> nextRequested.set(true));
hits.incrementAndGet();
}
@@ -403,12 +393,11 @@ public void testShortCircuitShardLevelFailure() throws Exception {
while (primaryPressure.stats().getPrimaryRejections() == primaryRejections) {
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
List> requests = new ArrayList<>();
for (int i = 0; i < 20; ++i) {
- requests.add(indexRequest(index));
+ requests.add(indexRequest(index, contextsToClose));
}
- handler.addItems(requests, refCounted::decRef, () -> nextRequested.set(true));
+ handler.addItems(requests, () -> nextRequested.set(true));
}
assertBusy(() -> assertTrue(nextRequested.get()));
}
@@ -416,14 +405,13 @@ public void testShortCircuitShardLevelFailure() throws Exception {
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index)), refCounted::decRef, () -> nextRequested.set(true));
+ handler.addItems(List.of(indexRequest(index, contextsToClose)), () -> nextRequested.set(true));
}
assertBusy(() -> assertTrue(nextRequested.get()));
PlainActionFuture future = new PlainActionFuture<>();
- handler.lastItems(List.of(indexRequest(index)), refCounted::decRef, future);
+ handler.lastItems(List.of(indexRequest(index, contextsToClose)), future);
BulkResponse bulkResponse = safeGet(future);
assertTrue(bulkResponse.hasFailures());
@@ -441,6 +429,8 @@ public void testShortCircuitShardLevelFailure() throws Exception {
assertThat(item.getFailure().getCause().getCause(), instanceOf(EsRejectedExecutionException.class));
}
}
+
+ assertThat(contextsToClose.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
}
public void testShortCircuitShardLevelFailureWithIngestNodeHop() throws Exception {
@@ -478,7 +468,7 @@ public void testShortCircuitShardLevelFailureWithIngestNodeHop() throws Exceptio
// a node with the ingest role.
String coordinatingOnlyNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ ArrayList contextsToClose = new ArrayList<>();
IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, coordinatingOnlyNode);
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
@@ -486,8 +476,7 @@ public void testShortCircuitShardLevelFailureWithIngestNodeHop() throws Exceptio
AtomicLong hits = new AtomicLong(0);
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index).setPipeline(pipelineId)), refCounted::decRef, () -> nextRequested.set(true));
+ handler.addItems(List.of(indexRequest(index, contextsToClose).setPipeline(pipelineId)), () -> nextRequested.set(true));
hits.incrementAndGet();
}
@@ -500,8 +489,7 @@ public void testShortCircuitShardLevelFailureWithIngestNodeHop() throws Exceptio
try (Releasable releasable = primaryPressure.validateAndMarkPrimaryOperationStarted(10, memoryLimit, 0, false, false)) {
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index).setPipeline(pipelineId)), refCounted::decRef, () -> nextRequested.set(true));
+ handler.addItems(List.of(indexRequest(index, contextsToClose).setPipeline(pipelineId)), () -> nextRequested.set(true));
}
assertBusy(() -> assertTrue(nextRequested.get()));
@@ -509,14 +497,13 @@ public void testShortCircuitShardLevelFailureWithIngestNodeHop() throws Exceptio
while (nextRequested.get()) {
nextRequested.set(false);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index).setPipeline(pipelineId)), refCounted::decRef, () -> nextRequested.set(true));
+ handler.addItems(List.of(indexRequest(index, contextsToClose).setPipeline(pipelineId)), () -> nextRequested.set(true));
}
assertBusy(() -> assertTrue(nextRequested.get()));
PlainActionFuture future = new PlainActionFuture<>();
- handler.lastItems(List.of(indexRequest(index)), refCounted::decRef, future);
+ handler.lastItems(List.of(indexRequest(index, contextsToClose)), future);
BulkResponse bulkResponse = safeGet(future);
assertTrue(bulkResponse.hasFailures());
@@ -529,6 +516,8 @@ public void testShortCircuitShardLevelFailureWithIngestNodeHop() throws Exceptio
assertTrue(item.isFailed());
assertThat(item.getFailure().getCause().getCause(), instanceOf(EsRejectedExecutionException.class));
}
+
+ assertThat(contextsToClose.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
}
private static void blockWriteCoordinationPool(ThreadPool threadPool, CountDownLatch finishLatch) {
@@ -583,13 +572,13 @@ public boolean isForceExecution() {
}
private BulkResponse executeBulk(long docs, String index, IncrementalBulkService.Handler handler, ExecutorService executorService) {
- ConcurrentLinkedQueue> queue = new ConcurrentLinkedQueue<>();
+ List contextsToClose = new ArrayList<>();
+ ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue<>();
for (int i = 0; i < docs; i++) {
- IndexRequest indexRequest = indexRequest(index);
+ IndexRequest indexRequest = indexRequest(index, contextsToClose);
queue.add(indexRequest);
}
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
PlainActionFuture future = new PlainActionFuture<>();
Runnable r = new Runnable() {
@@ -602,10 +591,9 @@ public void run() {
}
if (queue.isEmpty()) {
- handler.lastItems(docs, refCounted::decRef, future);
+ handler.lastItems(docs, future);
} else {
- refCounted.incRef();
- handler.addItems(docs, refCounted::decRef, () -> executorService.execute(this));
+ handler.addItems(docs, () -> executorService.execute(this));
}
}
};
@@ -613,20 +601,26 @@ public void run() {
executorService.execute(r);
BulkResponse bulkResponse = future.actionGet();
- assertFalse(refCounted.hasReferences());
+ assertThat(contextsToClose.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
return bulkResponse;
}
- private static void add512BRequests(ArrayList> requests, String index) {
+ private static void add512BRequests(ArrayList> requests, ArrayList contextsToClose, String index) {
long total = 0;
while (total < 512) {
- IndexRequest indexRequest = indexRequest(index);
+ IndexRequest indexRequest = indexRequest(index, contextsToClose);
requests.add(indexRequest);
total += indexRequest.ramBytesUsed();
}
assertThat(total, lessThan(1024L));
}
+ private static IndexRequest indexRequest(String index, List contextsToClose) {
+ IndexRequest indexRequest = indexRequest(index);
+ contextsToClose.add(indexRequest.sourceContext());
+ return indexRequest;
+ }
+
private static IndexRequest indexRequest(String index) {
IndexRequest indexRequest = new IndexRequest();
indexRequest.index(index);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java
index 290f299df5a4c..fde1124250c94 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java
@@ -18,6 +18,7 @@
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.SourceContext;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.cluster.metadata.IndexMetadata;
@@ -25,7 +26,6 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
-import org.elasticsearch.core.AbstractRefCounted;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.VersionType;
@@ -704,14 +704,14 @@ public void testIncrementalBulkLowWatermarkSplitMetrics() throws Exception {
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ ArrayList contextsToClose = new ArrayList<>();
AtomicBoolean nextPage = new AtomicBoolean(false);
IndexRequest indexRequest = indexRequest(index);
long total = indexRequest.ramBytesUsed();
while (total < 2048) {
- refCounted.incRef();
- handler.addItems(List.of(indexRequest), refCounted::decRef, () -> nextPage.set(true));
+ contextsToClose.add(indexRequest.sourceContext());
+ handler.addItems(List.of(indexRequest), () -> nextPage.set(true));
assertTrue(nextPage.get());
nextPage.set(false);
indexRequest = indexRequest(index);
@@ -737,8 +737,9 @@ public void testIncrementalBulkLowWatermarkSplitMetrics() throws Exception {
equalTo(0L)
);
- refCounted.incRef();
- handler.addItems(List.of(indexRequest(index)), refCounted::decRef, () -> nextPage.set(true));
+ indexRequest = indexRequest(index);
+ contextsToClose.add(indexRequest.sourceContext());
+ handler.addItems(List.of(indexRequest), () -> nextPage.set(true));
assertBusy(() -> assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(0L)));
assertBusy(() -> assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(1L)));
@@ -761,11 +762,13 @@ public void testIncrementalBulkLowWatermarkSplitMetrics() throws Exception {
);
PlainActionFuture future = new PlainActionFuture<>();
- handler.lastItems(List.of(indexRequest), refCounted::decRef, future);
+ IndexRequest lastRequest = indexRequest(index);
+ contextsToClose.add(lastRequest.sourceContext());
+ handler.lastItems(List.of(lastRequest), future);
BulkResponse bulkResponse = safeGet(future);
assertNoFailures(bulkResponse);
- assertFalse(refCounted.hasReferences());
+ assertThat(contextsToClose.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
}
// Borrowed this test from IncrementalBulkIT and added test for metrics to it
@@ -792,28 +795,26 @@ public void testIncrementalBulkHighWatermarkSplitMetrics() throws Exception {
.orElseThrow();
testTelemetryPlugin.resetMeter();
- AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {});
+ ArrayList contextsToClose = new ArrayList<>();
AtomicBoolean nextPage = new AtomicBoolean(false);
ArrayList handlers = new ArrayList<>();
for (int i = 0; i < 4; ++i) {
ArrayList> requests = new ArrayList<>();
- add512BRequests(requests, index);
+ add512BRequests(requests, contextsToClose, index);
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
handlers.add(handler);
- refCounted.incRef();
- handler.addItems(requests, refCounted::decRef, () -> nextPage.set(true));
+ handler.addItems(requests, () -> nextPage.set(true));
assertTrue(nextPage.get());
nextPage.set(false);
}
// Test that a request smaller than SPLIT_BULK_HIGH_WATERMARK_SIZE (1KB) is not throttled
ArrayList> requestsNoThrottle = new ArrayList<>();
- add512BRequests(requestsNoThrottle, index);
+ add512BRequests(requestsNoThrottle, contextsToClose, index);
IncrementalBulkService.Handler handlerNoThrottle = incrementalBulkService.newBulkRequest();
handlers.add(handlerNoThrottle);
- refCounted.incRef();
- handlerNoThrottle.addItems(requestsNoThrottle, refCounted::decRef, () -> nextPage.set(true));
+ handlerNoThrottle.addItems(requestsNoThrottle, () -> nextPage.set(true));
assertTrue(nextPage.get());
nextPage.set(false);
assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(0L));
@@ -836,14 +837,13 @@ public void testIncrementalBulkHighWatermarkSplitMetrics() throws Exception {
ArrayList> requestsThrottle = new ArrayList<>();
// Test that a request larger than SPLIT_BULK_HIGH_WATERMARK_SIZE (1KB) is throttled
- add512BRequests(requestsThrottle, index);
- add512BRequests(requestsThrottle, index);
+ add512BRequests(requestsThrottle, contextsToClose, index);
+ add512BRequests(requestsThrottle, contextsToClose, index);
CountDownLatch finishLatch = new CountDownLatch(1);
blockWriteCoordinationPool(threadPool, finishLatch);
IncrementalBulkService.Handler handlerThrottled = incrementalBulkService.newBulkRequest();
- refCounted.incRef();
- handlerThrottled.addItems(requestsThrottle, refCounted::decRef, () -> nextPage.set(true));
+ handlerThrottled.addItems(requestsThrottle, () -> nextPage.set(true));
assertFalse(nextPage.get());
finishLatch.countDown();
@@ -871,16 +871,16 @@ public void testIncrementalBulkHighWatermarkSplitMetrics() throws Exception {
);
for (IncrementalBulkService.Handler h : handlers) {
- refCounted.incRef();
PlainActionFuture future = new PlainActionFuture<>();
- h.lastItems(List.of(indexRequest(index)), refCounted::decRef, future);
+ IndexRequest indexRequest = indexRequest(index);
+ contextsToClose.add(indexRequest.sourceContext());
+ h.lastItems(List.of(indexRequest), future);
BulkResponse bulkResponse = safeGet(future);
assertNoFailures(bulkResponse);
}
assertBusy(() -> assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(0L)));
- refCounted.decRef();
- assertFalse(refCounted.hasReferences());
+ assertThat(contextsToClose.stream().filter(c -> c.isClosed() == false).count(), equalTo(0L));
testTelemetryPlugin.collect();
}
@@ -909,10 +909,11 @@ private static IndexRequest indexRequest(String index) {
return indexRequest;
}
- private static void add512BRequests(ArrayList> requests, String index) {
+ private static void add512BRequests(ArrayList> requests, ArrayList contextsToClose, String index) {
long total = 0;
while (total < 512) {
IndexRequest indexRequest = indexRequest(index);
+ contextsToClose.add(indexRequest.sourceContext());
requests.add(indexRequest);
total += indexRequest.ramBytesUsed();
}
diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java
index b398da27a8464..47986d44521ed 100644
--- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java
@@ -9,6 +9,7 @@
package org.elasticsearch.action;
import org.apache.lucene.util.Accountable;
+import org.elasticsearch.action.bulk.TransportAbstractBulkAction;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.IndicesOptions;
@@ -20,6 +21,7 @@
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.Releasable;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.shard.ShardId;
@@ -37,7 +39,7 @@
* Generic interface to group ActionRequest, which perform writes to a single document
* Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest}
*/
-public interface DocWriteRequest extends IndicesRequest, Accountable {
+public interface DocWriteRequest extends IndicesRequest, Accountable, Releasable {
// Flag set for disallowing index auto creation for an individual write request.
String REQUIRE_ALIAS = "require_alias";
@@ -346,4 +348,12 @@ static ActionRequestValidationException validateDocIdLength(String id, ActionReq
}
return validationException;
}
+
+ @Override
+ default void close() {
+ IndexRequest indexRequest = TransportAbstractBulkAction.getIndexWriteRequest(this);
+ if (indexRequest != null) {
+ indexRequest.sourceContext().close();
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/IncrementalBulkService.java b/server/src/main/java/org/elasticsearch/action/bulk/IncrementalBulkService.java
index b721a8f4f2b6b..f97920ea38a15 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/IncrementalBulkService.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/IncrementalBulkService.java
@@ -154,15 +154,15 @@ public void updateWaitForChunkMetrics(long chunkWaitTimeInMillis) {
chunkWaitTimeMillisHistogram.record(chunkWaitTimeInMillis);
}
- public void addItems(List> items, Releasable releasable, Runnable nextItems) {
+ public void addItems(List> items, Runnable nextItems) {
assert closed == false;
assert bulkInProgress == false;
if (bulkActionLevelFailure != null) {
- shortCircuitDueToTopLevelFailure(items, releasable);
+ shortCircuitDueToTopLevelFailure(items);
nextItems.run();
} else {
assert bulkRequest != null;
- if (internalAddItems(items, releasable)) {
+ if (internalAddItems(items)) {
Optional maybeSplit = incrementalOperation.maybeSplit();
if (maybeSplit.isPresent()) {
Releasable coordinating = maybeSplit.get();
@@ -200,14 +200,14 @@ public void onFailure(Exception e) {
}
}
- public void lastItems(List> items, Releasable releasable, ActionListener listener) {
+ public void lastItems(List> items, ActionListener listener) {
assert bulkInProgress == false;
if (bulkActionLevelFailure != null) {
- shortCircuitDueToTopLevelFailure(items, releasable);
+ shortCircuitDueToTopLevelFailure(items);
errorResponse(listener);
} else {
assert bulkRequest != null;
- if (internalAddItems(items, releasable)) {
+ if (internalAddItems(items)) {
Releasable coordinating = incrementalOperation.split();
final ArrayList toRelease = new ArrayList<>(releasables);
releasables.clear();
@@ -248,14 +248,14 @@ public void close() {
}
}
- private void shortCircuitDueToTopLevelFailure(List> items, Releasable releasable) {
+ private void shortCircuitDueToTopLevelFailure(List> items) {
assert releasables.isEmpty();
assert incrementalOperation.currentOperationsSize() == 0;
assert bulkRequest == null;
if (globalFailure == false) {
addItemLevelFailures(items);
}
- Releasables.close(releasable);
+ Releasables.close(items);
}
private void errorResponse(ActionListener listener) {
@@ -290,10 +290,10 @@ private void addItemLevelFailures(List> items) {
responses.add(new BulkResponse(bulkItemResponses, 0, 0));
}
- private boolean internalAddItems(List> items, Releasable releasable) {
+ private boolean internalAddItems(List> items) {
try {
bulkRequest.add(items);
- releasables.add(releasable);
+ releasables.addAll(items);
long size = items.stream().mapToLong(Accountable::ramBytesUsed).sum();
incrementalOperation.increment(items.size(), size);
return true;
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
index 619ff559d6808..e3dd1afff93bb 100644
--- a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
+++ b/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
@@ -52,6 +52,7 @@ public SourceContext(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
+ assert isClosed == false;
if (contentType != null) {
out.writeBoolean(true);
XContentHelper.writeTo(out, contentType);
@@ -62,25 +63,34 @@ public void writeTo(StreamOutput out) throws IOException {
}
public XContentType contentType() {
+ assert isClosed == false;
return contentType;
}
public BytesReference bytes() {
+ assert isClosed == false;
return source;
}
public ReleasableBytesReference retainedBytes() {
+ assert isClosed == false;
return source.retain();
}
public boolean hasSource() {
+ assert isClosed == false;
return source != null;
}
public int byteLength() {
+ assert isClosed == false;
return source == null ? 0 : source.length();
}
+ public boolean isClosed() {
+ return isClosed;
+ }
+
@Override
public void close() {
assert isClosed == false;
@@ -91,6 +101,7 @@ public void close() {
}
public Map sourceAsMap() {
+ assert isClosed == false;
return XContentHelper.convertToMap(source, false, contentType).v2();
}
@@ -239,6 +250,7 @@ private void setSource(BytesReference source, XContentType contentType) {
}
private void setSource(ReleasableBytesReference source, XContentType contentType) {
+ assert isClosed == false;
Releasable toClose = this.source;
this.source = source;
this.contentType = contentType;
diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
index ff8e68d462829..b7d567a89d3dd 100644
--- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
+++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
@@ -84,6 +84,16 @@ public ReleasableBytesReference retain() {
return this;
}
+ /**
+ * Similar to {@link #retain} except that it retains the current instance and then returns a new instance with its own dedicated
+ * ref-count. This is primarily useful if you are splitting off a reference for a different purpose and want to separate the ref-counts
+ * for less contention or making the counts easier to reason about.
+ */
+ public ReleasableBytesReference retainChild() {
+ refCounted.incRef();
+ return new ReleasableBytesReference(delegate, refCounted::decRef);
+ }
+
/**
* Same as {@link #slice} except that the slice is not guaranteed to share the same underlying reference count as this instance.
* This method is equivalent to calling {@code .slice(from, length).retain()} but might be more efficient through the avoidance of
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
index 4a10c120ff796..76d3c62e1c859 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
@@ -16,8 +16,6 @@
import org.elasticsearch.action.bulk.BulkRequestParser;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.bulk.IncrementalBulkService;
-import org.elasticsearch.action.bulk.TransportBulkAction;
-import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.bytes.CompositeBytesReference;
@@ -129,15 +127,15 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
request.getRestApiVersion()
);
} catch (Exception e) {
- closeSourceContexts(bulkRequest.requests());
+ Releasables.close(bulkRequest.requests());
return channel -> new RestToXContentListener<>(channel).onFailure(parseFailureException(e));
}
- // The request list is actually mutable so we make a copy for close.
- List> toClose = new ArrayList<>(bulkRequest.requests());
+ // The actual bulk request items are mutable during the bulk process so we must create a copy
+ List> toClose = bulkRequest.requests();
return channel -> client.bulk(
bulkRequest,
- ActionListener.releaseAfter(new RestRefCountedChunkedToXContentListener<>(channel), () -> closeSourceContexts(toClose))
+ ActionListener.releaseAfter(new RestRefCountedChunkedToXContentListener<>(channel), () -> Releasables.close(toClose))
);
} else {
request.ensureContent();
@@ -237,7 +235,10 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo
}
data = new ReleasableBytesReference(CompositeBytesReference.of(components), () -> Releasables.close(components));
} else {
- data = chunk.retain();
+ // We are creating a dedicated ref count for the application layer here. Otherwise, the leak tracking infrastructure
+ // will be hit every time a source is released for each doc. We still have leak tracking since the child references
+ // the parent. There is just a single reference instead of one for every doc.
+ data = chunk.retainChild();
}
try (ReleasableBytesReference toClose = data) {
@@ -262,14 +263,14 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo
assert channel != null;
ArrayList> toPass = new ArrayList<>(items);
items.clear();
- handler.lastItems(toPass, () -> closeSourceContexts(toPass), new RestRefCountedChunkedToXContentListener<>(channel));
+ handler.lastItems(toPass, new RestRefCountedChunkedToXContentListener<>(channel));
}
handler.updateWaitForChunkMetrics(TimeUnit.NANOSECONDS.toMillis(totalChunkWaitTimeInNanos));
totalChunkWaitTimeInNanos = 0L;
} else if (items.isEmpty() == false) {
ArrayList> toPass = new ArrayList<>(items);
items.clear();
- handler.addItems(toPass, () -> closeSourceContexts(toPass), () -> {
+ handler.addItems(toPass, () -> {
requestNextChunkTime = System.nanoTime();
request.contentStream().next();
});
@@ -291,7 +292,7 @@ private void shortCircuit() {
shortCircuited = true;
Releasables.close(handler);
Releasables.close(unParsedChunks);
- closeSourceContexts(items);
+ Releasables.close(items);
items.clear();
unParsedChunks.clear();
}
@@ -311,15 +312,6 @@ private void releaseConsumeBytes(int bytesConsumed) {
}
- private static void closeSourceContexts(List> requests) {
- for (DocWriteRequest> request : requests) {
- IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(request);
- if (indexRequest != null) {
- indexRequest.sourceContext().close();
- }
- }
- }
-
@Override
public boolean supportsBulkContent() {
return true;
diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java
index 6a1adbea5e7b6..2180cf5f85f7a 100644
--- a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java
+++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java
@@ -24,7 +24,7 @@
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.core.Releasable;
+import org.elasticsearch.core.Releasables;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.rest.RestChannel;
@@ -252,15 +252,15 @@ public void next() {
) {
@Override
- public void addItems(List> items, Releasable releasable, Runnable nextItems) {
- releasable.close();
+ public void addItems(List> items, Runnable nextItems) {
docs.addAll(items);
+ Releasables.close(items);
}
@Override
- public void lastItems(List> items, Releasable releasable, ActionListener listener) {
- releasable.close();
+ public void lastItems(List> items, ActionListener listener) {
docs.addAll(items);
+ Releasables.close(items);
isLast.set(true);
}
};
From 3396c647e07cfaaae2f4247073f7fcf5cbb47a7f Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Thu, 4 Sep 2025 16:39:49 -0600
Subject: [PATCH 12/20] Change
---
.../action/bulk/BulkSourceReleaseIT.java | 246 ++++++++++++++++++
.../action/document/RestBulkActionIT.java | 1 +
.../action/bulk/TransportShardBulkAction.java | 6 +-
.../bulk/TransportSimulateBulkAction.java | 4 +-
.../action/update/TransportUpdateAction.java | 4 +-
.../action/update/UpdateRequest.java | 4 +-
.../cluster/metadata/DataStream.java | 2 +-
.../action/bulk/BulkRequestParserTests.java | 16 +-
.../action/MonitoringBulkRequest.java | 2 +-
.../ShardBulkInferenceActionFilter.java | 23 +-
.../actions/index/ExecutableIndexAction.java | 2 +-
.../xpack/watcher/history/HistoryStore.java | 2 +-
12 files changed, 284 insertions(+), 28 deletions(-)
create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
new file mode 100644
index 0000000000000..b47b4a1d15bac
--- /dev/null
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
@@ -0,0 +1,246 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.ReleasableBytesReference;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.ingest.IngestClientIT;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.LockSupport;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
+
+public class BulkSourceReleaseIT extends ESIntegTestCase {
+
+ @Override
+ protected Collection> nodePlugins() {
+ return List.of(IngestClientIT.ExtendedIngestTestPlugin.class);
+ }
+
+ public void testBulkSourceReleaseWhenIngestReplacesSource() throws Exception {
+ String index = "test1";
+ createIndex(index);
+
+ String pipelineId = "pipeline_id";
+ putPipeline(pipelineId);
+
+ IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class);
+
+ ReleasableBytesReference originalBytes = new ReleasableBytesReference(new BytesArray("{\"field\": \"value\"}"), () -> {});
+
+ IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
+ IndexRequest indexRequest = new IndexRequest();
+ indexRequest.index(index);
+ indexRequest.sourceContext().source(originalBytes, XContentType.JSON);
+ indexRequest.setPipeline(pipelineId);
+
+ CountDownLatch blockLatch = new CountDownLatch(1);
+ blockWritePool(internalCluster().getInstance(ThreadPool.class), blockLatch);
+
+ PlainActionFuture future = new PlainActionFuture<>();
+
+ try {
+ handler.lastItems(List.of(indexRequest), future);
+ assertBusy(() -> assertFalse(originalBytes.hasReferences()));
+ } finally {
+ blockLatch.countDown();
+ }
+
+ BulkResponse bulkResponse = safeGet(future);
+ assertNoFailures(bulkResponse);
+ }
+
+ public void testBytesReferencedByTwoSourcesNotReleasedIfOnlyOneIngestPipeline() throws Exception {
+ String index = "test1";
+ createIndex(index);
+
+ String pipelineId = "pipeline_id";
+ putPipeline(pipelineId);
+
+ IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class);
+
+ ReleasableBytesReference originalBytes = new ReleasableBytesReference(
+ new BytesArray("{\"field\": \"value1\"}{\"field\": \"value2\"}"),
+ () -> {}
+ );
+ int splitPoint = originalBytes.indexOf((byte) '}', 0) + 1;
+
+ IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
+ IndexRequest indexRequest = new IndexRequest();
+ indexRequest.index(index);
+ indexRequest.sourceContext().source(originalBytes.retainedSlice(0, splitPoint), XContentType.JSON);
+ indexRequest.setPipeline(pipelineId);
+
+ IndexRequest indexRequestNoIngest = new IndexRequest();
+ indexRequestNoIngest.index(index);
+ indexRequestNoIngest.sourceContext()
+ .source(originalBytes.retainedSlice(splitPoint, originalBytes.length() - splitPoint), XContentType.JSON);
+
+ originalBytes.decRef();
+ assertTrue(originalBytes.hasReferences());
+
+ CountDownLatch blockLatch = new CountDownLatch(1);
+ blockWritePool(internalCluster().getInstance(ThreadPool.class), blockLatch);
+
+ PlainActionFuture future = new PlainActionFuture<>();
+ try {
+ handler.lastItems(List.of(indexRequest, indexRequestNoIngest), future);
+
+ // Pause briefly to allow bytes to theoretically be released after ingest processing
+ LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(50));
+
+ assertTrue(originalBytes.hasReferences());
+ } finally {
+ blockLatch.countDown();
+ }
+
+ blockLatch.countDown();
+
+ BulkResponse bulkResponse = safeGet(future);
+ assertNoFailures(bulkResponse);
+ }
+
+ public void testSomeReferencesCanBeReleasedWhileOthersRetained() throws Exception {
+ String index = "test1";
+ createIndex(index);
+
+ String pipelineId = "pipeline_id";
+ putPipeline(pipelineId);
+
+ IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class);
+
+ ReleasableBytesReference releasedBytes = new ReleasableBytesReference(new BytesArray("{\"field\": \"value1\"}"), () -> {});
+ ReleasableBytesReference retainedBytes = new ReleasableBytesReference(
+ new BytesArray("{\"field\": \"value2\"}{\"field\": \"value3\"}"),
+ () -> {}
+ );
+ int splitPoint = retainedBytes.indexOf((byte) '}', 0) + 1;
+
+ IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
+ IndexRequest indexRequest1 = new IndexRequest();
+ indexRequest1.index(index);
+ indexRequest1.sourceContext().source(releasedBytes, XContentType.JSON);
+ indexRequest1.setPipeline(pipelineId);
+
+ IndexRequest indexRequest2 = new IndexRequest();
+ indexRequest2.index(index);
+ indexRequest2.sourceContext().source(retainedBytes.retainedSlice(0, splitPoint), XContentType.JSON);
+ indexRequest2.setPipeline(pipelineId);
+
+ IndexRequest indexRequestNoIngest = new IndexRequest();
+ indexRequestNoIngest.index(index);
+ indexRequestNoIngest.sourceContext()
+ .source(retainedBytes.retainedSlice(splitPoint, retainedBytes.length() - splitPoint), XContentType.JSON);
+
+ retainedBytes.decRef();
+ assertTrue(retainedBytes.hasReferences());
+
+ CountDownLatch blockLatch = new CountDownLatch(1);
+ blockWritePool(internalCluster().getInstance(ThreadPool.class), blockLatch);
+
+ PlainActionFuture future = new PlainActionFuture<>();
+ try {
+ handler.lastItems(List.of(indexRequest2, indexRequest1, indexRequestNoIngest), future);
+
+ assertBusy(() -> assertFalse(releasedBytes.hasReferences()));
+
+ assertTrue(retainedBytes.hasReferences());
+ } finally {
+ blockLatch.countDown();
+ }
+
+ blockLatch.countDown();
+
+ BulkResponse bulkResponse = safeGet(future);
+ assertNoFailures(bulkResponse);
+ }
+
+ private static void putPipeline(String pipelineId) throws IOException {
+ BytesReference pipelineSource = BytesReference.bytes(
+ jsonBuilder().startObject()
+ .field("description", "my_pipeline")
+ .startArray("processors")
+ .startObject()
+ .startObject("test")
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject()
+ );
+ putJsonPipeline(pipelineId, pipelineSource);
+ }
+
+ private static void blockWritePool(ThreadPool threadPool, CountDownLatch finishLatch) {
+ final var threadCount = threadPool.info(ThreadPool.Names.WRITE).getMax();
+ final var startBarrier = new CyclicBarrier(threadCount + 1);
+ final var blockingTask = new AbstractRunnable() {
+ @Override
+ public void onFailure(Exception e) {
+ fail(e);
+ }
+
+ @Override
+ protected void doRun() {
+ safeAwait(startBarrier);
+ safeAwait(finishLatch);
+ }
+
+ @Override
+ public boolean isForceExecution() {
+ return true;
+ }
+ };
+ for (int i = 0; i < threadCount; i++) {
+ threadPool.executor(ThreadPool.Names.WRITE_COORDINATION).execute(blockingTask);
+ }
+ safeAwait(startBarrier);
+ }
+
+ private static void fillWriteQueue(ThreadPool threadPool) {
+ final var queueSize = Math.toIntExact(threadPool.info(ThreadPool.Names.WRITE).getQueueSize().singles());
+ final var queueFilled = new AtomicBoolean(false);
+ final var queueFillingTask = new AbstractRunnable() {
+ @Override
+ public void onFailure(Exception e) {
+ fail(e);
+ }
+
+ @Override
+ protected void doRun() {
+ assertTrue("thread pool not blocked", queueFilled.get());
+ }
+
+ @Override
+ public boolean isForceExecution() {
+ return true;
+ }
+ };
+ for (int i = 0; i < queueSize; i++) {
+ threadPool.executor(ThreadPool.Names.WRITE).execute(queueFillingTask);
+ }
+ queueFilled.set(true);
+ }
+}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/action/document/RestBulkActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/action/document/RestBulkActionIT.java
index d0b5ec4562903..8e35116102c14 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/rest/action/document/RestBulkActionIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/action/document/RestBulkActionIT.java
@@ -25,6 +25,7 @@
import static org.hamcrest.Matchers.not;
public class RestBulkActionIT extends ESIntegTestCase {
+
@Override
protected boolean addMockHttpTransport() {
return false;
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
index 8511223017c92..01968596db932 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
@@ -410,7 +410,7 @@ static boolean executeBulkItemRequest(
XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator(request);
final SourceToParse sourceToParse = new SourceToParse(
request.id(),
- request.sourceContext().bytes(),
+ request.source(),
request.getContentType(),
request.routing(),
request.getDynamicTemplates(),
@@ -609,7 +609,7 @@ private static BulkItemResponse processUpdateResponse(
);
if (updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) {
- final BytesReference indexSourceAsBytes = updateIndexRequest.sourceContext().bytes();
+ final BytesReference indexSourceAsBytes = updateIndexRequest.source();
final Tuple> sourceAndContent = XContentHelper.convertToMap(
indexSourceAsBytes,
true,
@@ -741,7 +741,7 @@ private static Engine.Result performOpOnReplica(
final IndexRequest indexRequest = (IndexRequest) docWriteRequest;
final SourceToParse sourceToParse = new SourceToParse(
indexRequest.id(),
- indexRequest.sourceContext().bytes(),
+ indexRequest.source(),
indexRequest.getContentType(),
indexRequest.routing()
);
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
index 2f622f12d7ccb..b52f5447b9311 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
@@ -159,7 +159,7 @@ protected void doInternalExecute(
request.id(),
request.index(),
request.version(),
- request.sourceContext().bytes(),
+ request.source(),
request.getContentType(),
request.getExecutedPipelines(),
validationResult.ignoredFields,
@@ -201,7 +201,7 @@ private ValidationResult validateMappings(
) {
final SourceToParse sourceToParse = new SourceToParse(
request.id(),
- request.sourceContext().bytes(),
+ request.source(),
request.getContentType(),
request.routing(),
request.getDynamicTemplates(),
diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
index 11de6b9d547e9..617a3c5c49b3e 100644
--- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
@@ -226,7 +226,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener<
case CREATED -> {
IndexRequest upsertRequest = result.action();
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
- final BytesReference upsertSourceBytes = upsertRequest.sourceContext().bytes();
+ final BytesReference upsertSourceBytes = upsertRequest.source();
client.bulk(
toSingleItemBulkRequest(upsertRequest),
unwrappingSingleItemBulkResponse(ActionListener.wrap(response -> {
@@ -269,7 +269,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener<
case UPDATED -> {
IndexRequest indexRequest = result.action();
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
- final BytesReference indexSourceBytes = indexRequest.sourceContext().bytes();
+ final BytesReference indexSourceBytes = indexRequest.source();
client.bulk(
toSingleItemBulkRequest(indexRequest),
unwrappingSingleItemBulkResponse(ActionListener.wrap(response -> {
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
index 2489d069ea737..657ad029626af 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
@@ -759,7 +759,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
XContentParser parser = XContentHelper.createParser(
NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE,
- doc.sourceContext().bytes(),
+ doc.source(),
xContentType
)
) {
@@ -782,7 +782,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
XContentParser parser = XContentHelper.createParser(
NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE,
- upsertRequest.sourceContext().bytes(),
+ upsertRequest.source(),
xContentType
)
) {
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
index dd6c08c81a4d8..67a2b23df50da 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
@@ -1738,7 +1738,7 @@ public Index getWriteIndex(IndexRequest request, ProjectMetadata project) {
if (rawTimestamp != null) {
timestamp = getTimeStampFromRaw(rawTimestamp);
} else {
- timestamp = getTimestampFromParser(request.sourceContext().bytes(), request.getContentType());
+ timestamp = getTimestampFromParser(request.source(), request.getContentType());
}
timestamp = getCanonicalTimestampBound(timestamp);
Index result = selectTimeSeriesWriteIndex(timestamp, project);
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java
index bad567dab3537..b5f2697dd537b 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java
@@ -87,7 +87,7 @@ public void testIncrementalParsing() throws IOException {
deleteRequests::add
);
- ReleasableBytesReference request = ReleasableBytesReference.wrap(new BytesArray("""
+ ReleasableBytesReference request = new ReleasableBytesReference(new BytesArray("""
{ "index":{ "_id": "bar", "pipeline": "foo" } }
{ "field": "value"}
{ "index":{ "require_alias": false } }
@@ -98,7 +98,7 @@ public void testIncrementalParsing() throws IOException {
{ "index": { } }
{ "field": "value"}
{ "delete":{ "_id": "bop" } }
- """));
+ """), () -> {});
int consumed = 0;
for (int i = 0; i < request.length() - 1; ++i) {
@@ -107,9 +107,21 @@ public void testIncrementalParsing() throws IOException {
consumed += incrementalParser.parse(request.slice(consumed, request.length() - consumed), true);
assertThat(consumed, equalTo(request.length()));
+ request.decRef();
+
+ // 3 Index request retaining
+ assertTrue(request.hasReferences());
+
assertThat(indexRequests.size(), equalTo(3));
assertThat(updateRequests.size(), equalTo(1));
assertThat(deleteRequests.size(), equalTo(2));
+
+ for (DocWriteRequest> req : indexRequests) {
+ req.close();
+ }
+
+ // Deletes and updates do not retain (upsert source is copied out opposed to sliced)
+ assertFalse(request.hasReferences());
}
public void testIndexRequest() throws IOException {
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java
index 504b717c61cd6..4b65bc3350080 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java
@@ -100,7 +100,7 @@ public MonitoringBulkRequest add(
if (MonitoringIndex.from(indexRequest.index()) != MonitoringIndex.TIMESTAMPED) {
return;
}
- final BytesReference source = indexRequest.sourceContext().bytes();
+ final BytesReference source = indexRequest.source();
if (source.length() == 0) {
throw new IllegalArgumentException(
"source is missing for monitoring document [" + indexRequest.index() + "][" + type + "][" + indexRequest.id() + "]"
diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
index f0be3bdabd49b..16f0fbe27b1fa 100644
--- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
+++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
@@ -18,6 +18,7 @@
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.bulk.TransportShardBulkAction;
import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.SourceContext;
import org.elasticsearch.action.support.ActionFilterChain;
import org.elasticsearch.action.support.MappedActionFilter;
import org.elasticsearch.action.support.RefCountingRunnable;
@@ -725,25 +726,21 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons
inferenceFieldsMap.put(fieldName, result);
}
- try (ReleasableBytesReference originalSource = indexRequest.sourceContext().retainedBytes()) {
+ SourceContext sourceContext = indexRequest.sourceContext();
+ try (ReleasableBytesReference originalSource = sourceContext.retainedBytes()) {
if (useLegacyFormat) {
- var newDocMap = indexRequest.sourceAsMap();
+ var newDocMap = sourceContext.sourceAsMap();
for (var entry : inferenceFieldsMap.entrySet()) {
XContentMapValues.insertValue(entry.getKey(), newDocMap, entry.getValue());
}
- indexRequest.source(newDocMap, indexRequest.getContentType());
+ sourceContext.source(newDocMap, sourceContext.contentType());
} else {
- try (XContentBuilder builder = XContentBuilder.builder(indexRequest.getContentType().xContent())) {
- appendSourceAndInferenceMetadata(
- builder,
- indexRequest.sourceContext().bytes(),
- indexRequest.getContentType(),
- inferenceFieldsMap
- );
- indexRequest.source(builder);
+ try (XContentBuilder builder = XContentBuilder.builder(sourceContext.contentType().xContent())) {
+ appendSourceAndInferenceMetadata(builder, sourceContext.bytes(), sourceContext.contentType(), inferenceFieldsMap);
+ sourceContext.source(builder);
}
}
- long modifiedSourceSize = indexRequest.sourceContext().bytes().length();
+ long modifiedSourceSize = sourceContext.bytes().length();
// Add the indexing pressure from the source modifications.
// Don't increment operation count because we count one source update as one operation, and we already accounted for those
@@ -751,7 +748,7 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons
try {
coordinatingIndexingPressure.increment(0, modifiedSourceSize - originalSource.length());
} catch (EsRejectedExecutionException e) {
- indexRequest.sourceContext().source(originalSource.retain(), indexRequest.getContentType());
+ sourceContext.source(originalSource.retain(), sourceContext.contentType());
item.abort(
item.index(),
new InferenceException(
diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java
index ef4282c744c94..b385298c09062 100644
--- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java
+++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java
@@ -110,7 +110,7 @@ public Action.Result execute(String actionId, WatchExecutionContext ctx, Payload
indexRequest.index(),
indexRequest.id(),
action.refreshPolicy,
- new XContentSource(indexRequest.sourceContext().bytes(), XContentType.JSON)
+ new XContentSource(indexRequest.source(), XContentType.JSON)
);
}
diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java
index 78d56fc5c3e2f..98bf3e7ab40a4 100644
--- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java
+++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java
@@ -56,7 +56,7 @@ public void put(WatchRecord watchRecord) throws Exception {
watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS);
IndexRequest request = new IndexRequest(HistoryStoreField.DATA_STREAM).id(watchRecord.id().value()).source(builder);
request.opType(IndexRequest.OpType.CREATE);
- if (request.sourceContext().bytes().length() > maxHistoryRecordSize.getBytes()) {
+ if (request.source().length() > maxHistoryRecordSize.getBytes()) {
WatchRecord redactedWatchRecord = watchRecord.dropLargeFields();
try (XContentBuilder redactedBuilder = XContentFactory.jsonBuilder()) {
redactedWatchRecord.toXContent(redactedBuilder, WatcherParams.HIDE_SECRETS);
From ba71bab571aae88250fa6496ceac16952154bfa8 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Thu, 4 Sep 2025 19:53:04 -0600
Subject: [PATCH 13/20] Fix
---
.../action/bulk/BulkSourceReleaseIT.java | 35 +++----------------
1 file changed, 5 insertions(+), 30 deletions(-)
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
index b47b4a1d15bac..1b26de9b7041a 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
@@ -27,12 +27,12 @@
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.LockSupport;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
+@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 1)
public class BulkSourceReleaseIT extends ESIntegTestCase {
@Override
@@ -58,7 +58,7 @@ public void testBulkSourceReleaseWhenIngestReplacesSource() throws Exception {
indexRequest.setPipeline(pipelineId);
CountDownLatch blockLatch = new CountDownLatch(1);
- blockWritePool(internalCluster().getInstance(ThreadPool.class), blockLatch);
+ blockWritePool(internalCluster().getDataNodeInstance(ThreadPool.class), blockLatch);
PlainActionFuture future = new PlainActionFuture<>();
@@ -103,14 +103,14 @@ public void testBytesReferencedByTwoSourcesNotReleasedIfOnlyOneIngestPipeline()
assertTrue(originalBytes.hasReferences());
CountDownLatch blockLatch = new CountDownLatch(1);
- blockWritePool(internalCluster().getInstance(ThreadPool.class), blockLatch);
+ blockWritePool(internalCluster().getDataNodeInstance(ThreadPool.class), blockLatch);
PlainActionFuture future = new PlainActionFuture<>();
try {
handler.lastItems(List.of(indexRequest, indexRequestNoIngest), future);
// Pause briefly to allow bytes to theoretically be released after ingest processing
- LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(50));
+ LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(500));
assertTrue(originalBytes.hasReferences());
} finally {
@@ -159,7 +159,7 @@ public void testSomeReferencesCanBeReleasedWhileOthersRetained() throws Exceptio
assertTrue(retainedBytes.hasReferences());
CountDownLatch blockLatch = new CountDownLatch(1);
- blockWritePool(internalCluster().getInstance(ThreadPool.class), blockLatch);
+ blockWritePool(internalCluster().getDataNodeInstance(ThreadPool.class), blockLatch);
PlainActionFuture future = new PlainActionFuture<>();
try {
@@ -218,29 +218,4 @@ public boolean isForceExecution() {
}
safeAwait(startBarrier);
}
-
- private static void fillWriteQueue(ThreadPool threadPool) {
- final var queueSize = Math.toIntExact(threadPool.info(ThreadPool.Names.WRITE).getQueueSize().singles());
- final var queueFilled = new AtomicBoolean(false);
- final var queueFillingTask = new AbstractRunnable() {
- @Override
- public void onFailure(Exception e) {
- fail(e);
- }
-
- @Override
- protected void doRun() {
- assertTrue("thread pool not blocked", queueFilled.get());
- }
-
- @Override
- public boolean isForceExecution() {
- return true;
- }
- };
- for (int i = 0; i < queueSize; i++) {
- threadPool.executor(ThreadPool.Names.WRITE).execute(queueFillingTask);
- }
- queueFilled.set(true);
- }
}
From b9523f5a02ef1088050c8932ed3eb5e516f45170 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Thu, 4 Sep 2025 21:48:55 -0600
Subject: [PATCH 14/20] Fix
---
.../org/elasticsearch/action/bulk/BulkSourceReleaseIT.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
index 1b26de9b7041a..a224d09ceae2c 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
@@ -110,7 +110,7 @@ public void testBytesReferencedByTwoSourcesNotReleasedIfOnlyOneIngestPipeline()
handler.lastItems(List.of(indexRequest, indexRequestNoIngest), future);
// Pause briefly to allow bytes to theoretically be released after ingest processing
- LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(500));
+ LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(50));
assertTrue(originalBytes.hasReferences());
} finally {
@@ -214,7 +214,7 @@ public boolean isForceExecution() {
}
};
for (int i = 0; i < threadCount; i++) {
- threadPool.executor(ThreadPool.Names.WRITE_COORDINATION).execute(blockingTask);
+ threadPool.executor(ThreadPool.Names.WRITE).execute(blockingTask);
}
safeAwait(startBarrier);
}
From 247debd830cbe9887b7e8af0b8038ad2d7f3799c Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Fri, 5 Sep 2025 10:40:23 -0600
Subject: [PATCH 15/20] Fix
---
.../org/elasticsearch/system/indices/SystemIndicesQA.java | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java
index 61907151da2c3..ae3943b16cd12 100644
--- a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java
+++ b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java
@@ -183,10 +183,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli
indexRequest.sourceContext().source(content.retain(), request.getXContentType());
indexRequest.id(request.param("id"));
indexRequest.setRefreshPolicy(request.param("refresh"));
- return channel -> client.index(
- indexRequest,
- ActionListener.releaseAfter(new RestToXContentListener<>(channel), indexRequest.sourceContext())
- );
+ return channel -> client.index(indexRequest, ActionListener.releaseAfter(new RestToXContentListener<>(channel), indexRequest));
}
@Override
From 3a23c608c236f562cb0599efc30ab8def9111e09 Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Fri, 5 Sep 2025 11:35:40 -0600
Subject: [PATCH 16/20] Fix
---
.../action/bulk/BulkSourceReleaseIT.java | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
index a224d09ceae2c..69ff55a05d570 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkSourceReleaseIT.java
@@ -47,7 +47,8 @@ public void testBulkSourceReleaseWhenIngestReplacesSource() throws Exception {
String pipelineId = "pipeline_id";
putPipeline(pipelineId);
- IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class);
+ // Get the data node to ensure the ingest pipeline can be performed on this node
+ IncrementalBulkService incrementalBulkService = internalCluster().getDataNodeInstance(IncrementalBulkService.class);
ReleasableBytesReference originalBytes = new ReleasableBytesReference(new BytesArray("{\"field\": \"value\"}"), () -> {});
@@ -80,7 +81,8 @@ public void testBytesReferencedByTwoSourcesNotReleasedIfOnlyOneIngestPipeline()
String pipelineId = "pipeline_id";
putPipeline(pipelineId);
- IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class);
+ // Get the data node to ensure the ingest pipeline can be performed on this node
+ IncrementalBulkService incrementalBulkService = internalCluster().getDataNodeInstance(IncrementalBulkService.class);
ReleasableBytesReference originalBytes = new ReleasableBytesReference(
new BytesArray("{\"field\": \"value1\"}{\"field\": \"value2\"}"),
@@ -130,7 +132,8 @@ public void testSomeReferencesCanBeReleasedWhileOthersRetained() throws Exceptio
String pipelineId = "pipeline_id";
putPipeline(pipelineId);
- IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class);
+ // Get the data node to ensure the ingest pipeline can be performed on this node
+ IncrementalBulkService incrementalBulkService = internalCluster().getDataNodeInstance(IncrementalBulkService.class);
ReleasableBytesReference releasedBytes = new ReleasableBytesReference(new BytesArray("{\"field\": \"value1\"}"), () -> {});
ReleasableBytesReference retainedBytes = new ReleasableBytesReference(
@@ -147,7 +150,7 @@ public void testSomeReferencesCanBeReleasedWhileOthersRetained() throws Exceptio
IndexRequest indexRequest2 = new IndexRequest();
indexRequest2.index(index);
- indexRequest2.sourceContext().source(retainedBytes.retainedSlice(0, splitPoint), XContentType.JSON);
+ indexRequest2.sourceContext().source(retainedBytes.slice(0, splitPoint), XContentType.JSON);
indexRequest2.setPipeline(pipelineId);
IndexRequest indexRequestNoIngest = new IndexRequest();
@@ -155,7 +158,6 @@ public void testSomeReferencesCanBeReleasedWhileOthersRetained() throws Exceptio
indexRequestNoIngest.sourceContext()
.source(retainedBytes.retainedSlice(splitPoint, retainedBytes.length() - splitPoint), XContentType.JSON);
- retainedBytes.decRef();
assertTrue(retainedBytes.hasReferences());
CountDownLatch blockLatch = new CountDownLatch(1);
From 1f8c2eb00baa6d5c7a97b2cc0dc09f44fe06d80a Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Thu, 11 Sep 2025 12:58:09 -0600
Subject: [PATCH 17/20] Rename
---
...lureStoreMetricsWithIncrementalBulkIT.java | 4 +-
.../action/bulk/IncrementalBulkIT.java | 20 +++---
.../metrics/NodeIndexingMetricsIT.java | 8 +--
.../action/index/IndexRequest.java | 64 +++++++++----------
.../action/index/IndexRequestBuilder.java | 2 +-
.../{SourceContext.java => IndexSource.java} | 8 +--
.../action/update/UpdateRequestBuilder.java | 6 +-
.../ShardBulkInferenceActionFilter.java | 20 +++---
.../ShardBulkInferenceActionFilterTests.java | 24 +++----
.../LimitAwareBulkIndexerTests.java | 4 +-
10 files changed, 80 insertions(+), 80 deletions(-)
rename server/src/main/java/org/elasticsearch/action/index/{SourceContext.java => IndexSource.java} (97%)
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java
index 4246ff1552349..e9c1f68a986b2 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java
@@ -18,7 +18,7 @@
import org.elasticsearch.action.datastreams.CreateDataStreamAction;
import org.elasticsearch.action.datastreams.GetDataStreamAction;
import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.SourceContext;
+import org.elasticsearch.action.index.IndexSource;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStream;
@@ -89,7 +89,7 @@ public void testShortCircuitFailure() throws Exception {
String coordinatingOnlyNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
- final ArrayList contextsToRelease = new ArrayList<>();
+ final ArrayList contextsToRelease = new ArrayList<>();
IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, coordinatingOnlyNode);
try (IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest()) {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java
index a4ed25798e05e..49ea94a287bce 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java
@@ -11,7 +11,7 @@
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.SourceContext;
+import org.elasticsearch.action.index.IndexSource;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.bytes.BytesReference;
@@ -127,7 +127,7 @@ public void testIndexingPressureRejection() {
try (Releasable r = indexingPressure.markCoordinatingOperationStarted(1, indexingPressure.stats().getMemoryLimit(), true)) {
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
- ArrayList contextsToClose = new ArrayList<>();
+ ArrayList contextsToClose = new ArrayList<>();
if (randomBoolean()) {
AtomicBoolean nextPage = new AtomicBoolean(false);
@@ -154,7 +154,7 @@ public void testIncrementalBulkLowWatermarkBackOff() throws Exception {
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
- ArrayList contextsToClose = new ArrayList<>();
+ ArrayList contextsToClose = new ArrayList<>();
AtomicBoolean nextPage = new AtomicBoolean(false);
IndexRequest indexRequest = indexRequest(index);
@@ -196,7 +196,7 @@ public void testIncrementalBulkHighWatermarkBackOff() throws Exception {
long lowWaterMarkSplits = indexingPressure.stats().getLowWaterMarkSplits();
long highWaterMarkSplits = indexingPressure.stats().getHighWaterMarkSplits();
- ArrayList contextsToClose = new ArrayList<>();
+ ArrayList contextsToClose = new ArrayList<>();
AtomicBoolean nextPage = new AtomicBoolean(false);
ArrayList handlers = new ArrayList<>();
@@ -317,7 +317,7 @@ public void testBulkLevelBulkFailureAfterFirstIncrementalRequest() throws Except
IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, randomNodeName);
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, randomNodeName);
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
- ArrayList contextsToClose = new ArrayList<>();
+ ArrayList contextsToClose = new ArrayList<>();
PlainActionFuture future = new PlainActionFuture<>();
CountDownLatch blockingLatch1 = new CountDownLatch(1);
@@ -370,7 +370,7 @@ public void testShortCircuitShardLevelFailure() throws Exception {
String coordinatingOnlyNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
- ArrayList contextsToClose = new ArrayList<>();
+ ArrayList contextsToClose = new ArrayList<>();
IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, coordinatingOnlyNode);
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
@@ -468,7 +468,7 @@ public void testShortCircuitShardLevelFailureWithIngestNodeHop() throws Exceptio
// a node with the ingest role.
String coordinatingOnlyNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
- ArrayList contextsToClose = new ArrayList<>();
+ ArrayList contextsToClose = new ArrayList<>();
IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, coordinatingOnlyNode);
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
@@ -572,7 +572,7 @@ public boolean isForceExecution() {
}
private BulkResponse executeBulk(long docs, String index, IncrementalBulkService.Handler handler, ExecutorService executorService) {
- List contextsToClose = new ArrayList<>();
+ List contextsToClose = new ArrayList<>();
ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue<>();
for (int i = 0; i < docs; i++) {
IndexRequest indexRequest = indexRequest(index, contextsToClose);
@@ -605,7 +605,7 @@ public void run() {
return bulkResponse;
}
- private static void add512BRequests(ArrayList> requests, ArrayList contextsToClose, String index) {
+ private static void add512BRequests(ArrayList> requests, ArrayList contextsToClose, String index) {
long total = 0;
while (total < 512) {
IndexRequest indexRequest = indexRequest(index, contextsToClose);
@@ -615,7 +615,7 @@ private static void add512BRequests(ArrayList> requests, Arra
assertThat(total, lessThan(1024L));
}
- private static IndexRequest indexRequest(String index, List contextsToClose) {
+ private static IndexRequest indexRequest(String index, List contextsToClose) {
IndexRequest indexRequest = indexRequest(index);
contextsToClose.add(indexRequest.sourceContext());
return indexRequest;
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java
index fde1124250c94..93ee28904c4fe 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java
@@ -18,7 +18,7 @@
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.SourceContext;
+import org.elasticsearch.action.index.IndexSource;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.cluster.metadata.IndexMetadata;
@@ -704,7 +704,7 @@ public void testIncrementalBulkLowWatermarkSplitMetrics() throws Exception {
IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest();
- ArrayList contextsToClose = new ArrayList<>();
+ ArrayList contextsToClose = new ArrayList<>();
AtomicBoolean nextPage = new AtomicBoolean(false);
IndexRequest indexRequest = indexRequest(index);
@@ -795,7 +795,7 @@ public void testIncrementalBulkHighWatermarkSplitMetrics() throws Exception {
.orElseThrow();
testTelemetryPlugin.resetMeter();
- ArrayList contextsToClose = new ArrayList<>();
+ ArrayList contextsToClose = new ArrayList<>();
AtomicBoolean nextPage = new AtomicBoolean(false);
ArrayList handlers = new ArrayList<>();
@@ -909,7 +909,7 @@ private static IndexRequest indexRequest(String index) {
return indexRequest;
}
- private static void add512BRequests(ArrayList> requests, ArrayList contextsToClose, String index) {
+ private static void add512BRequests(ArrayList> requests, ArrayList contextsToClose, String index) {
long total = 0;
while (total < 512) {
IndexRequest indexRequest = indexRequest(index);
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 4d0532471dc89..dda9e47ca4897 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -95,7 +95,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement
@Nullable
private String routing;
- private final SourceContext sourceContext;
+ private final IndexSource indexSource;
private OpType opType = OpType.INDEX;
@@ -162,11 +162,11 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
routing = in.readOptionalString();
boolean beforeSourceContext = in.getTransportVersion().before(TransportVersions.SOURCE_CONTEXT);
BytesReference source;
- SourceContext localSourceContext = null;
+ IndexSource localIndexSource = null;
if (beforeSourceContext) {
source = in.readBytesReference();
} else {
- localSourceContext = new SourceContext(in);
+ localIndexSource = new IndexSource(in);
source = null;
}
opType = OpType.fromId(in.readByte());
@@ -185,9 +185,9 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
} else {
contentType = null;
}
- localSourceContext = new SourceContext(contentType, source);
+ localIndexSource = new IndexSource(contentType, source);
}
- sourceContext = Objects.requireNonNull(localSourceContext);
+ indexSource = Objects.requireNonNull(localIndexSource);
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
requireAlias = in.readBoolean();
@@ -228,7 +228,7 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
public IndexRequest() {
super(NO_SHARD_ID);
- this.sourceContext = new SourceContext();
+ this.indexSource = new IndexSource();
}
/**
@@ -238,7 +238,7 @@ public IndexRequest() {
public IndexRequest(String index) {
super(NO_SHARD_ID);
this.index = index;
- this.sourceContext = new SourceContext();
+ this.indexSource = new IndexSource();
}
private static final StringLiteralDeduplicator pipelineNameDeduplicator = new StringLiteralDeduplicator();
@@ -260,10 +260,10 @@ private static String readPipelineName(StreamInput in) throws IOException {
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
- if (sourceContext.hasSource() == false) {
+ if (indexSource.hasSource() == false) {
validationException = addValidationError("source is missing", validationException);
}
- if (sourceContext.contentType() == null) {
+ if (indexSource.contentType() == null) {
validationException = addValidationError("content type is missing", validationException);
}
assert opType == OpType.INDEX || opType == OpType.CREATE : "unexpected op-type: " + opType;
@@ -321,7 +321,7 @@ public ActionRequestValidationException validate() {
* source at index time
*/
public XContentType getContentType() {
- return sourceContext.contentType();
+ return indexSource.contentType();
}
/**
@@ -418,19 +418,19 @@ public boolean isPipelineResolved() {
return this.isPipelineResolved;
}
- public SourceContext sourceContext() {
- return sourceContext;
+ public IndexSource sourceContext() {
+ return indexSource;
}
/**
* The source of the document to index, recopied to a new array if it is unsafe.
*/
public BytesReference source() {
- return sourceContext.bytes();
+ return indexSource.bytes();
}
public Map sourceAsMap() {
- return sourceContext.sourceAsMap();
+ return indexSource.sourceAsMap();
}
/**
@@ -439,7 +439,7 @@ public Map sourceAsMap() {
* @param source The map to index
*/
public IndexRequest source(Map source) throws ElasticsearchGenerationException {
- sourceContext.source(source);
+ indexSource.source(source);
return this;
}
@@ -449,13 +449,13 @@ public IndexRequest source(Map source) throws ElasticsearchGeneration
* @param source The map to index
*/
public IndexRequest source(Map source, XContentType contentType) throws ElasticsearchGenerationException {
- sourceContext.source(source, contentType);
+ indexSource.source(source, contentType);
return this;
}
public IndexRequest source(Map source, XContentType contentType, boolean ensureNoSelfReferences)
throws ElasticsearchGenerationException {
- sourceContext.source(source, contentType, ensureNoSelfReferences);
+ indexSource.source(source, contentType, ensureNoSelfReferences);
return this;
}
@@ -466,7 +466,7 @@ public IndexRequest source(Map source, XContentType contentType, bool
* or using the {@link #source(byte[], XContentType)}.
*/
public IndexRequest source(String source, XContentType xContentType) {
- sourceContext.source(source, xContentType);
+ indexSource.source(source, xContentType);
return this;
}
@@ -474,7 +474,7 @@ public IndexRequest source(String source, XContentType xContentType) {
* Sets the content source to index.
*/
public IndexRequest source(XContentBuilder sourceBuilder) {
- sourceContext.source(sourceBuilder);
+ indexSource.source(sourceBuilder);
return this;
}
@@ -487,7 +487,7 @@ public IndexRequest source(XContentBuilder sourceBuilder) {
*
*/
public IndexRequest source(Object... source) {
- sourceContext.source(source);
+ indexSource.source(source);
return this;
}
@@ -500,7 +500,7 @@ public IndexRequest source(Object... source) {
*
*/
public IndexRequest source(XContentType xContentType, Object... source) {
- sourceContext.source(xContentType, source);
+ indexSource.source(xContentType, source);
return this;
}
@@ -508,7 +508,7 @@ public IndexRequest source(XContentType xContentType, Object... source) {
* Sets the document to index in bytes form.
*/
public IndexRequest source(BytesReference source, XContentType xContentType) {
- sourceContext.source(Objects.requireNonNull(source), Objects.requireNonNull(xContentType));
+ indexSource.source(Objects.requireNonNull(source), Objects.requireNonNull(xContentType));
return this;
}
@@ -516,7 +516,7 @@ public IndexRequest source(BytesReference source, XContentType xContentType) {
* Sets the document to index in bytes form.
*/
public IndexRequest source(byte[] source, XContentType xContentType) {
- sourceContext.source(source, xContentType);
+ indexSource.source(source, xContentType);
return this;
}
@@ -529,7 +529,7 @@ public IndexRequest source(byte[] source, XContentType xContentType) {
* @param length The length of the data
*/
public IndexRequest source(byte[] source, int offset, int length, XContentType xContentType) {
- sourceContext.source(source, offset, length, xContentType);
+ indexSource.source(source, offset, length, xContentType);
return this;
}
@@ -742,9 +742,9 @@ private void writeBody(StreamOutput out) throws IOException {
out.writeOptionalString(id);
out.writeOptionalString(routing);
if (out.getTransportVersion().onOrAfter(TransportVersions.SOURCE_CONTEXT)) {
- sourceContext.writeTo(out);
+ indexSource.writeTo(out);
} else {
- out.writeBytesReference(sourceContext.bytes());
+ out.writeBytesReference(indexSource.bytes());
}
out.writeByte(opType.getId());
out.writeLong(version);
@@ -755,7 +755,7 @@ private void writeBody(StreamOutput out) throws IOException {
out.writeBoolean(isRetry);
out.writeLong(autoGeneratedTimestamp);
if (out.getTransportVersion().before(TransportVersions.SOURCE_CONTEXT)) {
- XContentType contentType = sourceContext.contentType();
+ XContentType contentType = indexSource.contentType();
if (contentType != null) {
out.writeBoolean(true);
XContentHelper.writeTo(out, contentType);
@@ -800,13 +800,13 @@ private void writeBody(StreamOutput out) throws IOException {
public String toString() {
String sSource = "_na_";
try {
- if (sourceContext.byteLength() > MAX_SOURCE_LENGTH_IN_TOSTRING) {
+ if (indexSource.byteLength() > MAX_SOURCE_LENGTH_IN_TOSTRING) {
sSource = "n/a, actual length: ["
- + ByteSizeValue.ofBytes(sourceContext.byteLength()).toString()
+ + ByteSizeValue.ofBytes(indexSource.byteLength()).toString()
+ "], max length: "
+ ByteSizeValue.ofBytes(MAX_SOURCE_LENGTH_IN_TOSTRING).toString();
} else {
- sSource = XContentHelper.convertToJson(sourceContext.bytes(), false);
+ sSource = XContentHelper.convertToJson(indexSource.bytes(), false);
}
} catch (Exception e) {
// ignore
@@ -841,7 +841,7 @@ public long getAutoGeneratedTimestamp() {
@Override
public long ramBytesUsed() {
- return SHALLOW_SIZE + RamUsageEstimator.sizeOf(id) + sourceContext.byteLength();
+ return SHALLOW_SIZE + RamUsageEstimator.sizeOf(id) + indexSource.byteLength();
}
@Override
@@ -896,7 +896,7 @@ public Index getConcreteWriteIndex(IndexAbstraction ia, ProjectMetadata project)
@Override
public int route(IndexRouting indexRouting) {
- return indexRouting.indexShard(id, routing, sourceContext.contentType(), sourceContext.bytes());
+ return indexRouting.indexShard(id, routing, indexSource.contentType(), indexSource.bytes());
}
public IndexRequest setRequireAlias(boolean requireAlias) {
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
index 947d6bf610f49..c28b48ce825c5 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
@@ -176,7 +176,7 @@ public IndexRequestBuilder setSource(Object... source) {
*
*/
public IndexRequestBuilder setSource(XContentType xContentType, Object... source) {
- return setSource(SourceContext.getXContentBuilder(xContentType, source));
+ return setSource(IndexSource.getXContentBuilder(xContentType, source));
}
/**
diff --git a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java b/server/src/main/java/org/elasticsearch/action/index/IndexSource.java
similarity index 97%
rename from server/src/main/java/org/elasticsearch/action/index/SourceContext.java
rename to server/src/main/java/org/elasticsearch/action/index/IndexSource.java
index e3dd1afff93bb..ad2f52c8742b8 100644
--- a/server/src/main/java/org/elasticsearch/action/index/SourceContext.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexSource.java
@@ -27,20 +27,20 @@
import java.io.IOException;
import java.util.Map;
-public class SourceContext implements Writeable, Releasable {
+public class IndexSource implements Writeable, Releasable {
private XContentType contentType;
private ReleasableBytesReference source;
private boolean isClosed = false;
- public SourceContext() {}
+ public IndexSource() {}
- public SourceContext(XContentType contentType, BytesReference source) {
+ public IndexSource(XContentType contentType, BytesReference source) {
this.contentType = contentType;
this.source = ReleasableBytesReference.wrap(source);
}
- public SourceContext(StreamInput in) throws IOException {
+ public IndexSource(StreamInput in) throws IOException {
if (in.readBoolean()) {
// faster than StreamInput::readEnum, do not replace we read a lot of these instances at times
contentType = XContentType.ofOrdinal(in.readByte());
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
index 0ff3572a7a574..e1e54cd31ab9b 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
@@ -11,7 +11,7 @@
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.SourceContext;
+import org.elasticsearch.action.index.IndexSource;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteRequestBuilder;
@@ -294,7 +294,7 @@ public UpdateRequestBuilder setDoc(Object... source) {
* is a field and value pairs.
*/
public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source) {
- return setDoc(SourceContext.getXContentBuilder(xContentType, source));
+ return setDoc(IndexSource.getXContentBuilder(xContentType, source));
}
/**
@@ -373,7 +373,7 @@ public UpdateRequestBuilder setUpsert(Object... source) {
* includes field and value pairs.
*/
public UpdateRequestBuilder setUpsert(XContentType xContentType, Object... source) {
- return setUpsert(SourceContext.getXContentBuilder(xContentType, source));
+ return setUpsert(IndexSource.getXContentBuilder(xContentType, source));
}
/**
diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
index 16f0fbe27b1fa..66f8dad1156e7 100644
--- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
+++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java
@@ -18,7 +18,7 @@
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.bulk.TransportShardBulkAction;
import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.SourceContext;
+import org.elasticsearch.action.index.IndexSource;
import org.elasticsearch.action.support.ActionFilterChain;
import org.elasticsearch.action.support.MappedActionFilter;
import org.elasticsearch.action.support.RefCountingRunnable;
@@ -726,21 +726,21 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons
inferenceFieldsMap.put(fieldName, result);
}
- SourceContext sourceContext = indexRequest.sourceContext();
- try (ReleasableBytesReference originalSource = sourceContext.retainedBytes()) {
+ IndexSource indexSource = indexRequest.sourceContext();
+ try (ReleasableBytesReference originalSource = indexSource.retainedBytes()) {
if (useLegacyFormat) {
- var newDocMap = sourceContext.sourceAsMap();
+ var newDocMap = indexSource.sourceAsMap();
for (var entry : inferenceFieldsMap.entrySet()) {
XContentMapValues.insertValue(entry.getKey(), newDocMap, entry.getValue());
}
- sourceContext.source(newDocMap, sourceContext.contentType());
+ indexSource.source(newDocMap, indexSource.contentType());
} else {
- try (XContentBuilder builder = XContentBuilder.builder(sourceContext.contentType().xContent())) {
- appendSourceAndInferenceMetadata(builder, sourceContext.bytes(), sourceContext.contentType(), inferenceFieldsMap);
- sourceContext.source(builder);
+ try (XContentBuilder builder = XContentBuilder.builder(indexSource.contentType().xContent())) {
+ appendSourceAndInferenceMetadata(builder, indexSource.bytes(), indexSource.contentType(), inferenceFieldsMap);
+ indexSource.source(builder);
}
}
- long modifiedSourceSize = sourceContext.bytes().length();
+ long modifiedSourceSize = indexSource.bytes().length();
// Add the indexing pressure from the source modifications.
// Don't increment operation count because we count one source update as one operation, and we already accounted for those
@@ -748,7 +748,7 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons
try {
coordinatingIndexingPressure.increment(0, modifiedSourceSize - originalSource.length());
} catch (EsRejectedExecutionException e) {
- sourceContext.source(originalSource.retain(), sourceContext.contentType());
+ indexSource.source(originalSource.retain(), indexSource.contentType());
item.abort(
item.index(),
new InferenceException(
diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java
index 36dc512a5286c..a67dc48b47c88 100644
--- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java
+++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java
@@ -18,7 +18,7 @@
import org.elasticsearch.action.bulk.BulkShardResponse;
import org.elasticsearch.action.bulk.TransportShardBulkAction;
import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.SourceContext;
+import org.elasticsearch.action.index.IndexSource;
import org.elasticsearch.action.support.ActionFilterChain;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
@@ -574,21 +574,21 @@ public void testIndexingPressure() throws Exception {
inferenceStats
);
- XContentBuilder doc0Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "a test value");
- XContentBuilder doc1Source = SourceContext.getXContentBuilder(XContentType.JSON, "dense_field", "another test value");
- XContentBuilder doc2Source = SourceContext.getXContentBuilder(
+ XContentBuilder doc0Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "a test value");
+ XContentBuilder doc1Source = IndexSource.getXContentBuilder(XContentType.JSON, "dense_field", "another test value");
+ XContentBuilder doc2Source = IndexSource.getXContentBuilder(
XContentType.JSON,
"sparse_field",
"a test value",
"dense_field",
"another test value"
);
- XContentBuilder doc3Source = SourceContext.getXContentBuilder(
+ XContentBuilder doc3Source = IndexSource.getXContentBuilder(
XContentType.JSON,
"dense_field",
List.of("value one", " ", "value two")
);
- XContentBuilder doc4Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", " ");
+ XContentBuilder doc4Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", " ");
XContentBuilder doc5Source = XContentFactory.contentBuilder(XContentType.JSON);
{
doc5Source.startObject();
@@ -602,8 +602,8 @@ public void testIndexingPressure() throws Exception {
);
doc5Source.endObject();
}
- XContentBuilder doc0UpdateSource = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "an updated value");
- XContentBuilder doc1UpdateSource = SourceContext.getXContentBuilder(XContentType.JSON, "dense_field", null);
+ XContentBuilder doc0UpdateSource = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "an updated value");
+ XContentBuilder doc1UpdateSource = IndexSource.getXContentBuilder(XContentType.JSON, "dense_field", null);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
@@ -693,7 +693,7 @@ public void testIndexingPressureTripsOnInferenceRequestGeneration() throws Excep
inferenceStats
);
- XContentBuilder doc1Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
+ XContentBuilder doc1Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
@@ -762,7 +762,7 @@ public void testIndexingPressureTripsOnInferenceRequestGeneration() throws Excep
@SuppressWarnings("unchecked")
public void testIndexingPressureTripsOnInferenceResponseHandling() throws Exception {
- final XContentBuilder doc1Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
+ final XContentBuilder doc1Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
final InstrumentedIndexingPressure indexingPressure = new InstrumentedIndexingPressure(
Settings.builder().put(MAX_COORDINATING_BYTES.getKey(), (length(doc1Source) + 1) + "b").build()
);
@@ -849,8 +849,8 @@ public void testIndexingPressureTripsOnInferenceResponseHandling() throws Except
@SuppressWarnings("unchecked")
public void testIndexingPressurePartialFailure() throws Exception {
// Use different length strings so that doc 1 and doc 2 sources are different sizes
- final XContentBuilder doc1Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
- final XContentBuilder doc2Source = SourceContext.getXContentBuilder(XContentType.JSON, "sparse_field", "bazzz");
+ final XContentBuilder doc1Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
+ final XContentBuilder doc2Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "bazzz");
final StaticModel sparseModel = StaticModel.createRandomInstance(TaskType.SPARSE_EMBEDDING);
final ChunkedInferenceEmbedding barEmbedding = randomChunkedInferenceEmbedding(sparseModel, List.of("bar"));
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexerTests.java
index b0a14611de221..da260c07c883e 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexerTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexerTests.java
@@ -9,7 +9,7 @@
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.SourceContext;
+import org.elasticsearch.action.index.IndexSource;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
@@ -80,7 +80,7 @@ private LimitAwareBulkIndexer createIndexer(long bytesLimit) {
private static IndexRequest mockIndexRequest(long ramBytes) {
IndexRequest indexRequest = mock(IndexRequest.class);
when(indexRequest.ramBytesUsed()).thenReturn(ramBytes);
- when(indexRequest.sourceContext()).thenReturn(new SourceContext());
+ when(indexRequest.sourceContext()).thenReturn(new IndexSource());
return indexRequest;
}
}
From 20fbc4677b5062a7241ef21c6a7568112bcc519e Mon Sep 17 00:00:00 2001
From: Tim Brooks
Date: Thu, 11 Sep 2025 16:23:46 -0600
Subject: [PATCH 18/20] Change
---
.../org/elasticsearch/rest/action/document/RestBulkAction.java | 2 +-
.../org/elasticsearch/rest/action/document/RestIndexAction.java | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
index 76d3c62e1c859..111d9b4d24148 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
@@ -132,7 +132,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
}
// The actual bulk request items are mutable during the bulk process so we must create a copy
- List> toClose = bulkRequest.requests();
+ List> toClose = new ArrayList<>(bulkRequest.requests());
return channel -> client.bulk(
bulkRequest,
ActionListener.releaseAfter(new RestRefCountedChunkedToXContentListener<>(channel), () -> Releasables.close(toClose))
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
index f1c29affc3229..1799635193c36 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
@@ -157,7 +157,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
indexRequest,
ActionListener.releaseAfter(
new RestToXContentListener<>(channel, DocWriteResponse::status, r -> r.getLocation(indexRequest.routing())),
- indexRequest.indexSource()
+ indexRequest
)
);
};
From 82f4885968a09d3b6810c5704ca49d8893352911 Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
Date: Wed, 17 Sep 2025 17:35:33 +0000
Subject: [PATCH 19/20] [CI] Update transport version definitions
---
server/src/main/resources/transport/upper_bounds/9.2.csv | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/server/src/main/resources/transport/upper_bounds/9.2.csv b/server/src/main/resources/transport/upper_bounds/9.2.csv
index 49360d5e62d69..e24f914a1d1ca 100644
--- a/server/src/main/resources/transport/upper_bounds/9.2.csv
+++ b/server/src/main/resources/transport/upper_bounds/9.2.csv
@@ -1 +1 @@
-inference_api_eis_diagnostics,9156000
+ml_inference_endpoint_cache,9157000
From fefec4ab4094d2bc5edafaa3a97511dc39921335 Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
Date: Thu, 2 Oct 2025 07:40:58 +0000
Subject: [PATCH 20/20] [CI] Update transport version definitions
---
server/src/main/resources/transport/upper_bounds/8.18.csv | 2 +-
server/src/main/resources/transport/upper_bounds/8.19.csv | 2 +-
server/src/main/resources/transport/upper_bounds/9.0.csv | 2 +-
server/src/main/resources/transport/upper_bounds/9.1.csv | 2 +-
server/src/main/resources/transport/upper_bounds/9.2.csv | 2 +-
server/src/main/resources/transport/upper_bounds/9.3.csv | 1 +
6 files changed, 6 insertions(+), 5 deletions(-)
create mode 100644 server/src/main/resources/transport/upper_bounds/9.3.csv
diff --git a/server/src/main/resources/transport/upper_bounds/8.18.csv b/server/src/main/resources/transport/upper_bounds/8.18.csv
index 4eb5140004ea6..266bfbbd3bf78 100644
--- a/server/src/main/resources/transport/upper_bounds/8.18.csv
+++ b/server/src/main/resources/transport/upper_bounds/8.18.csv
@@ -1 +1 @@
-initial_elasticsearch_8_18_6,8840008
+transform_check_for_dangling_tasks,8840011
diff --git a/server/src/main/resources/transport/upper_bounds/8.19.csv b/server/src/main/resources/transport/upper_bounds/8.19.csv
index 476468b203875..3600b3f8c633a 100644
--- a/server/src/main/resources/transport/upper_bounds/8.19.csv
+++ b/server/src/main/resources/transport/upper_bounds/8.19.csv
@@ -1 +1 @@
-initial_elasticsearch_8_19_3,8841067
+transform_check_for_dangling_tasks,8841070
diff --git a/server/src/main/resources/transport/upper_bounds/9.0.csv b/server/src/main/resources/transport/upper_bounds/9.0.csv
index f8f50cc6d7839..c11e6837bb813 100644
--- a/server/src/main/resources/transport/upper_bounds/9.0.csv
+++ b/server/src/main/resources/transport/upper_bounds/9.0.csv
@@ -1 +1 @@
-initial_elasticsearch_9_0_6,9000015
+transform_check_for_dangling_tasks,9000018
diff --git a/server/src/main/resources/transport/upper_bounds/9.1.csv b/server/src/main/resources/transport/upper_bounds/9.1.csv
index 5a65f2e578156..80b97d85f7511 100644
--- a/server/src/main/resources/transport/upper_bounds/9.1.csv
+++ b/server/src/main/resources/transport/upper_bounds/9.1.csv
@@ -1 +1 @@
-initial_elasticsearch_9_1_4,9112007
+transform_check_for_dangling_tasks,9112009
diff --git a/server/src/main/resources/transport/upper_bounds/9.2.csv b/server/src/main/resources/transport/upper_bounds/9.2.csv
index e24f914a1d1ca..2147eab66c207 100644
--- a/server/src/main/resources/transport/upper_bounds/9.2.csv
+++ b/server/src/main/resources/transport/upper_bounds/9.2.csv
@@ -1 +1 @@
-ml_inference_endpoint_cache,9157000
+initial_9.2.0,9185000
diff --git a/server/src/main/resources/transport/upper_bounds/9.3.csv b/server/src/main/resources/transport/upper_bounds/9.3.csv
new file mode 100644
index 0000000000000..2147eab66c207
--- /dev/null
+++ b/server/src/main/resources/transport/upper_bounds/9.3.csv
@@ -0,0 +1 @@
+initial_9.2.0,9185000