|
23 | 23 | import org.elasticsearch.action.support.IndicesOptions; |
24 | 24 | import org.elasticsearch.client.internal.Client; |
25 | 25 | import org.elasticsearch.cluster.ClusterState; |
| 26 | +import org.elasticsearch.common.bytes.ReleasableBytesReference; |
26 | 27 | import org.elasticsearch.common.io.stream.NamedWriteableRegistry; |
| 28 | +import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; |
27 | 29 | import org.elasticsearch.common.io.stream.StreamInput; |
28 | 30 | import org.elasticsearch.common.io.stream.StreamOutput; |
29 | 31 | import org.elasticsearch.common.io.stream.Writeable; |
|
50 | 52 | import org.elasticsearch.tasks.TaskId; |
51 | 53 | import org.elasticsearch.threadpool.ThreadPool; |
52 | 54 | import org.elasticsearch.transport.AbstractTransportRequest; |
| 55 | +import org.elasticsearch.transport.BytesTransportResponse; |
53 | 56 | import org.elasticsearch.transport.LeakTracker; |
54 | 57 | import org.elasticsearch.transport.SendRequestTransportException; |
55 | 58 | import org.elasticsearch.transport.Transport; |
|
58 | 61 | import org.elasticsearch.transport.TransportException; |
59 | 62 | import org.elasticsearch.transport.TransportResponse; |
60 | 63 | import org.elasticsearch.transport.TransportResponseHandler; |
| 64 | +import org.elasticsearch.transport.TransportService; |
61 | 65 |
|
62 | 66 | import java.io.IOException; |
63 | 67 | import java.util.ArrayList; |
@@ -215,22 +219,6 @@ public static final class NodeQueryResponse extends TransportResponse { |
215 | 219 | this.topDocsStats = SearchPhaseController.TopDocsStats.readFrom(in); |
216 | 220 | } |
217 | 221 |
|
218 | | - NodeQueryResponse( |
219 | | - QueryPhaseResultConsumer.MergeResult mergeResult, |
220 | | - Object[] results, |
221 | | - SearchPhaseController.TopDocsStats topDocsStats |
222 | | - ) { |
223 | | - this.results = results; |
224 | | - for (Object result : results) { |
225 | | - if (result instanceof QuerySearchResult r) { |
226 | | - r.incRef(); |
227 | | - } |
228 | | - } |
229 | | - this.mergeResult = mergeResult; |
230 | | - this.topDocsStats = topDocsStats; |
231 | | - assert Arrays.stream(results).noneMatch(Objects::isNull) : Arrays.toString(results); |
232 | | - } |
233 | | - |
234 | 222 | // public for tests |
235 | 223 | public Object[] getResults() { |
236 | 224 | return results; |
@@ -552,7 +540,7 @@ static void registerNodeSearchAction( |
552 | 540 | ) { |
553 | 541 | var transportService = searchTransportService.transportService(); |
554 | 542 | var threadPool = transportService.getThreadPool(); |
555 | | - final Dependencies dependencies = new Dependencies(searchService, threadPool.executor(ThreadPool.Names.SEARCH)); |
| 543 | + final Dependencies dependencies = new Dependencies(searchService, transportService, threadPool.executor(ThreadPool.Names.SEARCH)); |
556 | 544 | // Even though not all searches run on the search pool, we use the search pool size as the upper limit of shards to execute in |
557 | 545 | // parallel to keep the implementation simple instead of working out the exact pool(s) a query will use up-front. |
558 | 546 | final int searchPoolMax = threadPool.info(ThreadPool.Names.SEARCH).getMax(); |
@@ -715,7 +703,7 @@ public void onFailure(Exception e) { |
715 | 703 | } |
716 | 704 | } |
717 | 705 |
|
718 | | - private record Dependencies(SearchService searchService, Executor executor) {} |
| 706 | + private record Dependencies(SearchService searchService, TransportService transportService, Executor executor) {} |
719 | 707 |
|
720 | 708 | private static final class QueryPerNodeState { |
721 | 709 |
|
@@ -760,6 +748,8 @@ void onShardDone() { |
760 | 748 | if (countDown.countDown() == false) { |
761 | 749 | return; |
762 | 750 | } |
| 751 | + RecyclerBytesStreamOutput out = null; |
| 752 | + boolean success = false; |
763 | 753 | var channelListener = new ChannelActionListener<>(channel); |
764 | 754 | try (queryPhaseResultConsumer) { |
765 | 755 | var failure = queryPhaseResultConsumer.failure.get(); |
@@ -788,34 +778,46 @@ void onShardDone() { |
788 | 778 | relevantShardIndices.set(localIndex); |
789 | 779 | } |
790 | 780 | } |
791 | | - final Object[] results = new Object[queryPhaseResultConsumer.getNumShards()]; |
792 | | - for (int i = 0; i < results.length; i++) { |
793 | | - var result = queryPhaseResultConsumer.results.get(i); |
794 | | - if (result == null) { |
795 | | - results[i] = failures.get(i); |
796 | | - } else { |
797 | | - // free context id and remove it from the result right away in case we don't need it anymore |
798 | | - if (result instanceof QuerySearchResult q |
799 | | - && q.getContextId() != null |
800 | | - && relevantShardIndices.get(q.getShardIndex()) == false |
801 | | - && q.hasSuggestHits() == false |
802 | | - && q.getRankShardResult() == null |
803 | | - && searchRequest.searchRequest.scroll() == null |
804 | | - && isPartOfPIT(searchRequest.searchRequest, q.getContextId()) == false) { |
805 | | - if (dependencies.searchService.freeReaderContext(q.getContextId())) { |
806 | | - q.clearContextId(); |
| 781 | + final int resultCount = queryPhaseResultConsumer.getNumShards(); |
| 782 | + out = dependencies.transportService.newNetworkBytesStream(); |
| 783 | + out.setTransportVersion(channel.getVersion()); |
| 784 | + try { |
| 785 | + out.writeVInt(resultCount); |
| 786 | + for (int i = 0; i < resultCount; i++) { |
| 787 | + var result = queryPhaseResultConsumer.results.get(i); |
| 788 | + if (result == null) { |
| 789 | + out.writeBoolean(false); |
| 790 | + out.writeException(failures.remove(i)); |
| 791 | + } else { |
| 792 | + // free context id and remove it from the result right away in case we don't need it anymore |
| 793 | + if (result instanceof QuerySearchResult q |
| 794 | + && q.getContextId() != null |
| 795 | + && relevantShardIndices.get(q.getShardIndex()) == false |
| 796 | + && q.hasSuggestHits() == false |
| 797 | + && q.getRankShardResult() == null |
| 798 | + && searchRequest.searchRequest.scroll() == null |
| 799 | + && isPartOfPIT(searchRequest.searchRequest, q.getContextId()) == false) { |
| 800 | + if (dependencies.searchService.freeReaderContext(q.getContextId())) { |
| 801 | + q.clearContextId(); |
| 802 | + } |
807 | 803 | } |
| 804 | + out.writeBoolean(true); |
| 805 | + result.writeTo(out); |
808 | 806 | } |
809 | | - results[i] = result; |
810 | 807 | } |
811 | | - assert results[i] != null; |
| 808 | + mergeResult.writeTo(out); |
| 809 | + queryPhaseResultConsumer.topDocsStats.writeTo(out); |
| 810 | + success = true; |
| 811 | + } catch (IOException e) { |
| 812 | + handleMergeFailure(e, channelListener); |
| 813 | + return; |
| 814 | + } |
| 815 | + } finally { |
| 816 | + if (success == false && out != null) { |
| 817 | + out.close(); |
812 | 818 | } |
813 | | - |
814 | | - ActionListener.respondAndRelease( |
815 | | - channelListener, |
816 | | - new NodeQueryResponse(mergeResult, results, queryPhaseResultConsumer.topDocsStats) |
817 | | - ); |
818 | 819 | } |
| 820 | + ActionListener.respondAndRelease(channelListener, new BytesTransportResponse(new ReleasableBytesReference(out.bytes(), out))); |
819 | 821 | } |
820 | 822 |
|
821 | 823 | private void handleMergeFailure(Exception e, ChannelActionListener<TransportResponse> channelListener) { |
|
0 commit comments