|
9 | 9 |
|
10 | 10 | package org.elasticsearch.action.search; |
11 | 11 |
|
12 | | -import org.apache.lucene.index.Term; |
13 | | -import org.apache.lucene.search.CollectionStatistics; |
14 | 12 | import org.apache.lucene.search.FieldDoc; |
15 | 13 | import org.apache.lucene.search.ScoreDoc; |
16 | 14 | import org.apache.lucene.search.Sort; |
17 | 15 | import org.apache.lucene.search.SortField; |
18 | 16 | import org.apache.lucene.search.SortedNumericSortField; |
19 | 17 | import org.apache.lucene.search.SortedSetSortField; |
20 | | -import org.apache.lucene.search.TermStatistics; |
21 | 18 | import org.apache.lucene.search.TopDocs; |
22 | 19 | import org.apache.lucene.search.TopFieldDocs; |
23 | 20 | import org.apache.lucene.search.TotalHits; |
24 | 21 | import org.apache.lucene.search.TotalHits.Relation; |
25 | | -import org.apache.lucene.util.SetOnce; |
26 | 22 | import org.elasticsearch.common.breaker.CircuitBreaker; |
27 | 23 | import org.elasticsearch.common.io.stream.DelayableWriteable; |
28 | 24 | import org.elasticsearch.common.lucene.Lucene; |
|
42 | 38 | import org.elasticsearch.search.aggregations.AggregatorFactories; |
43 | 39 | import org.elasticsearch.search.aggregations.InternalAggregations; |
44 | 40 | import org.elasticsearch.search.builder.SearchSourceBuilder; |
45 | | -import org.elasticsearch.search.dfs.AggregatedDfs; |
46 | | -import org.elasticsearch.search.dfs.DfsKnnResults; |
47 | | -import org.elasticsearch.search.dfs.DfsSearchResult; |
48 | 41 | import org.elasticsearch.search.fetch.FetchSearchResult; |
49 | 42 | import org.elasticsearch.search.internal.SearchContext; |
50 | 43 | import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; |
@@ -84,97 +77,6 @@ public SearchPhaseController( |
84 | 77 | this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder; |
85 | 78 | } |
86 | 79 |
|
87 | | - public static AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) { |
88 | | - Map<Term, TermStatistics> termStatistics = new HashMap<>(); |
89 | | - Map<String, CollectionStatistics> fieldStatistics = new HashMap<>(); |
90 | | - long aggMaxDoc = 0; |
91 | | - for (DfsSearchResult lEntry : results) { |
92 | | - final Term[] terms = lEntry.terms(); |
93 | | - final TermStatistics[] stats = lEntry.termStatistics(); |
94 | | - assert terms.length == stats.length; |
95 | | - for (int i = 0; i < terms.length; i++) { |
96 | | - assert terms[i] != null; |
97 | | - if (stats[i] == null) { |
98 | | - continue; |
99 | | - } |
100 | | - TermStatistics existing = termStatistics.get(terms[i]); |
101 | | - if (existing != null) { |
102 | | - assert terms[i].bytes().equals(existing.term()); |
103 | | - termStatistics.put( |
104 | | - terms[i], |
105 | | - new TermStatistics( |
106 | | - existing.term(), |
107 | | - existing.docFreq() + stats[i].docFreq(), |
108 | | - existing.totalTermFreq() + stats[i].totalTermFreq() |
109 | | - ) |
110 | | - ); |
111 | | - } else { |
112 | | - termStatistics.put(terms[i], stats[i]); |
113 | | - } |
114 | | - |
115 | | - } |
116 | | - |
117 | | - assert lEntry.fieldStatistics().containsKey(null) == false; |
118 | | - for (var entry : lEntry.fieldStatistics().entrySet()) { |
119 | | - String key = entry.getKey(); |
120 | | - CollectionStatistics value = entry.getValue(); |
121 | | - if (value == null) { |
122 | | - continue; |
123 | | - } |
124 | | - assert key != null; |
125 | | - CollectionStatistics existing = fieldStatistics.get(key); |
126 | | - if (existing != null) { |
127 | | - CollectionStatistics merged = new CollectionStatistics( |
128 | | - key, |
129 | | - existing.maxDoc() + value.maxDoc(), |
130 | | - existing.docCount() + value.docCount(), |
131 | | - existing.sumTotalTermFreq() + value.sumTotalTermFreq(), |
132 | | - existing.sumDocFreq() + value.sumDocFreq() |
133 | | - ); |
134 | | - fieldStatistics.put(key, merged); |
135 | | - } else { |
136 | | - fieldStatistics.put(key, value); |
137 | | - } |
138 | | - } |
139 | | - aggMaxDoc += lEntry.maxDoc(); |
140 | | - } |
141 | | - return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc); |
142 | | - } |
143 | | - |
144 | | - public static List<DfsKnnResults> mergeKnnResults(SearchRequest request, List<DfsSearchResult> dfsSearchResults) { |
145 | | - if (request.hasKnnSearch() == false) { |
146 | | - return null; |
147 | | - } |
148 | | - SearchSourceBuilder source = request.source(); |
149 | | - List<List<TopDocs>> topDocsLists = new ArrayList<>(source.knnSearch().size()); |
150 | | - List<SetOnce<String>> nestedPath = new ArrayList<>(source.knnSearch().size()); |
151 | | - for (int i = 0; i < source.knnSearch().size(); i++) { |
152 | | - topDocsLists.add(new ArrayList<>()); |
153 | | - nestedPath.add(new SetOnce<>()); |
154 | | - } |
155 | | - |
156 | | - for (DfsSearchResult dfsSearchResult : dfsSearchResults) { |
157 | | - if (dfsSearchResult.knnResults() != null) { |
158 | | - for (int i = 0; i < dfsSearchResult.knnResults().size(); i++) { |
159 | | - DfsKnnResults knnResults = dfsSearchResult.knnResults().get(i); |
160 | | - ScoreDoc[] scoreDocs = knnResults.scoreDocs(); |
161 | | - TotalHits totalHits = new TotalHits(scoreDocs.length, Relation.EQUAL_TO); |
162 | | - TopDocs shardTopDocs = new TopDocs(totalHits, scoreDocs); |
163 | | - setShardIndex(shardTopDocs, dfsSearchResult.getShardIndex()); |
164 | | - topDocsLists.get(i).add(shardTopDocs); |
165 | | - nestedPath.get(i).trySet(knnResults.getNestedPath()); |
166 | | - } |
167 | | - } |
168 | | - } |
169 | | - |
170 | | - List<DfsKnnResults> mergedResults = new ArrayList<>(source.knnSearch().size()); |
171 | | - for (int i = 0; i < source.knnSearch().size(); i++) { |
172 | | - TopDocs mergedTopDocs = TopDocs.merge(source.knnSearch().get(i).k(), topDocsLists.get(i).toArray(new TopDocs[0])); |
173 | | - mergedResults.add(new DfsKnnResults(nestedPath.get(i).get(), mergedTopDocs.scoreDocs)); |
174 | | - } |
175 | | - return mergedResults; |
176 | | - } |
177 | | - |
178 | 80 | /** |
179 | 81 | * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each |
180 | 82 | * named completion suggestion across all shards. If more than one named completion suggestion is specified in the |
@@ -496,38 +398,6 @@ private static SearchHits getHits( |
496 | 398 | ); |
497 | 399 | } |
498 | 400 |
|
499 | | - /** |
500 | | - * Reduces the given query results and consumes all aggregations and profile results. |
501 | | - * @param queryResults a list of non-null query shard results |
502 | | - */ |
503 | | - static ReducedQueryPhase reducedScrollQueryPhase(Collection<? extends SearchPhaseResult> queryResults) { |
504 | | - AggregationReduceContext.Builder aggReduceContextBuilder = new AggregationReduceContext.Builder() { |
505 | | - @Override |
506 | | - public AggregationReduceContext forPartialReduction() { |
507 | | - throw new UnsupportedOperationException("Scroll requests don't have aggs"); |
508 | | - } |
509 | | - |
510 | | - @Override |
511 | | - public AggregationReduceContext forFinalReduction() { |
512 | | - throw new UnsupportedOperationException("Scroll requests don't have aggs"); |
513 | | - } |
514 | | - }; |
515 | | - final TopDocsStats topDocsStats = new TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE); |
516 | | - final List<TopDocs> topDocs = new ArrayList<>(); |
517 | | - for (SearchPhaseResult sortedResult : queryResults) { |
518 | | - QuerySearchResult queryResult = sortedResult.queryResult(); |
519 | | - final TopDocsAndMaxScore td = queryResult.consumeTopDocs(); |
520 | | - assert td != null; |
521 | | - topDocsStats.add(td, queryResult.searchTimedOut(), queryResult.terminatedEarly()); |
522 | | - // make sure we set the shard index before we add it - the consumer didn't do that yet |
523 | | - if (td.topDocs.scoreDocs.length > 0) { |
524 | | - setShardIndex(td.topDocs, queryResult.getShardIndex()); |
525 | | - topDocs.add(td.topDocs); |
526 | | - } |
527 | | - } |
528 | | - return reducedQueryPhase(queryResults, null, topDocs, topDocsStats, 0, true, aggReduceContextBuilder, null, true); |
529 | | - } |
530 | | - |
531 | 401 | /** |
532 | 402 | * Reduces the given query results and consumes all aggregations and profile results. |
533 | 403 | * @param queryResults a list of non-null query shard results |
|
0 commit comments