@@ -699,12 +699,12 @@ static void registerNodeSearchAction(
699699 TransportActionProxy .registerProxyAction (transportService , NODE_SEARCH_ACTION_NAME , true , NodeQueryResponse ::new );
700700 }
701701
702- private static void releaseLocalContext (SearchService searchService , SearchRequest searchRequest , SearchPhaseResult result ) {
702+ private static void releaseLocalContext (SearchService searchService , NodeQueryRequest request , SearchPhaseResult result ) {
703703 var phaseResult = result .queryResult () != null ? result .queryResult () : result .rankFeatureResult ();
704704 if (phaseResult != null
705705 && phaseResult .hasSearchContext ()
706- && searchRequest .scroll () == null
707- && isPartOfPIT (searchRequest , phaseResult .getContextId ()) == false ) {
706+ && request . searchRequest .scroll () == null
707+ && isPartOfPIT (request . searchRequest , phaseResult .getContextId ()) == false ) {
708708 searchService .freeReaderContext (phaseResult .getContextId ());
709709 }
710710 }
@@ -752,7 +752,7 @@ private static ShardSearchRequest buildShardSearchRequest(
752752
753753 private static void executeShardTasks (QueryPerNodeState state ) {
754754 int idx ;
755- final NodeQueryRequest nodeQueryRequest = state .nodeQueryRequest ;
755+ final NodeQueryRequest nodeQueryRequest = state .searchRequest ;
756756 var shards = nodeQueryRequest .shards ;
757757 final int totalShardCount = shards .size ();
758758 while ((idx = state .currentShardIndex .getAndIncrement ()) < totalShardCount ) {
@@ -842,7 +842,7 @@ private static final class QueryPerNodeState {
842842
843843 private final AtomicInteger currentShardIndex = new AtomicInteger ();
844844 private final QueryPhaseResultConsumer queryPhaseResultConsumer ;
845- private final NodeQueryRequest nodeQueryRequest ;
845+ private final NodeQueryRequest searchRequest ;
846846 private final IntUnaryOperator shardsToQuery ;
847847 private final CancellableTask task ;
848848 private final ConcurrentHashMap <Integer , Exception > failures = new ConcurrentHashMap <>();
@@ -857,18 +857,18 @@ private static final class QueryPerNodeState {
857857
858858 private QueryPerNodeState (
859859 QueryPhaseResultConsumer queryPhaseResultConsumer ,
860- NodeQueryRequest nodeQueryRequest ,
860+ NodeQueryRequest searchRequest ,
861861 IntUnaryOperator shardsToQuery ,
862862 CancellableTask task ,
863863 TransportChannel channel ,
864864 Dependencies dependencies ,
865865 @ Nullable ShardSearchRequest [] shardSearchRequests
866866 ) {
867867 this .queryPhaseResultConsumer = queryPhaseResultConsumer ;
868- this .nodeQueryRequest = nodeQueryRequest ;
868+ this .searchRequest = searchRequest ;
869869 this .shardsToQuery = shardsToQuery ;
870- this .trackTotalHitsUpTo = nodeQueryRequest .searchRequest .resolveTrackTotalHitsUpTo ();
871- this .topDocsSize = getTopDocsSize (nodeQueryRequest .searchRequest );
870+ this .trackTotalHitsUpTo = searchRequest .searchRequest .resolveTrackTotalHitsUpTo ();
871+ this .topDocsSize = getTopDocsSize (searchRequest .searchRequest );
872872 this .task = task ;
873873 this .countDown = new CountDown (queryPhaseResultConsumer .getNumShards ());
874874 this .channel = channel ;
@@ -902,11 +902,11 @@ void onShardDone() {
902902 // translate shard indices to those on the coordinator so that it can interpret the merge result without adjustments,
903903 // also collect the set of indices that may be part of a subsequent fetch operation here so that we can release all other
904904 // indices without a roundtrip to the coordinating node
905- final BitSet relevantShardIndices = new BitSet (nodeQueryRequest .shards .size ());
905+ final BitSet relevantShardIndices = new BitSet (searchRequest .shards .size ());
906906 if (mergeResult .reducedTopDocs () != null ) {
907907 for (ScoreDoc scoreDoc : mergeResult .reducedTopDocs ().scoreDocs ) {
908908 final int localIndex = scoreDoc .shardIndex ;
909- scoreDoc .shardIndex = nodeQueryRequest .shards .get (localIndex ).shardIndex ;
909+ scoreDoc .shardIndex = searchRequest .shards .get (localIndex ).shardIndex ;
910910 relevantShardIndices .set (localIndex );
911911 }
912912 }
@@ -945,8 +945,8 @@ private void maybeFreeContext(SearchPhaseResult result, BitSet relevantShardIndi
945945 && relevantShardIndices .get (q .getShardIndex ()) == false
946946 && q .hasSuggestHits () == false
947947 && q .getRankShardResult () == null
948- && nodeQueryRequest .searchRequest .scroll () == null
949- && isPartOfPIT (nodeQueryRequest .searchRequest , q .getContextId ()) == false ) {
948+ && searchRequest .searchRequest .scroll () == null
949+ && isPartOfPIT (searchRequest .searchRequest , q .getContextId ()) == false ) {
950950 if (dependencies .searchService .freeReaderContext (q .getContextId ())) {
951951 q .clearContextId ();
952952 }
@@ -955,9 +955,7 @@ && isPartOfPIT(nodeQueryRequest.searchRequest, q.getContextId()) == false) {
955955
956956 private void handleMergeFailure (Exception e , ChannelActionListener <TransportResponse > channelListener ) {
957957 queryPhaseResultConsumer .getSuccessfulResults ()
958- .forEach (
959- searchPhaseResult -> releaseLocalContext (dependencies .searchService , nodeQueryRequest .searchRequest , searchPhaseResult )
960- );
958+ .forEach (searchPhaseResult -> releaseLocalContext (dependencies .searchService , searchRequest , searchPhaseResult ));
961959 channelListener .onFailure (e );
962960 }
963961
@@ -967,7 +965,7 @@ void consumeResult(QuerySearchResult queryResult) {
967965 // TODO: dry up the bottom sort collector with the coordinator side logic in the top-level class here
968966 if (queryResult .isNull () == false
969967 // disable sort optims for scroll requests because they keep track of the last bottom doc locally (per shard)
970- && nodeQueryRequest .searchRequest .scroll () == null
968+ && searchRequest .searchRequest .scroll () == null
971969 // top docs are already consumed if the query was cancelled or in error.
972970 && queryResult .hasConsumedTopDocs () == false
973971 && queryResult .topDocs () != null
0 commit comments