|
49 | 49 | import java.util.HashSet; |
50 | 50 | import java.util.List; |
51 | 51 | import java.util.Set; |
52 | | -import java.util.function.Function; |
| 52 | +import java.util.function.BiConsumer; |
53 | 53 | import java.util.stream.Collectors; |
54 | 54 |
|
55 | 55 | import static org.elasticsearch.xpack.esql.core.util.StringUtils.WILDCARD; |
@@ -107,31 +107,36 @@ public static PreAnalysisResult resolveFieldNames(LogicalPlan parsed, EnrichReso |
107 | 107 | Set<String> wildcardJoinIndices = new java.util.HashSet<>(); |
108 | 108 |
|
109 | 109 | var canRemoveAliases = new Holder<>(true); |
110 | | - var needsAllFields = new Holder<>(false); |
111 | 110 |
|
112 | | - var processingLambda = new Holder<Function<LogicalPlan, Boolean>>(); |
113 | | - processingLambda.set((LogicalPlan p) -> {// go over each plan top-down |
| 111 | + var processingLambda = new Holder<BiConsumer<LogicalPlan, Holder<Boolean>>>(); |
| 112 | + processingLambda.set((LogicalPlan p, Holder<Boolean> breakEarly) -> {// go over each plan top-down |
114 | 113 | if (p instanceof Fork fork) { |
115 | | - // Early return from forEachDown. We will iterate over the children manually. |
| 114 | + // Early return from forEachDown. We will iterate over the children manually and end the recursion via forEachDown early. |
116 | 115 | var forkRefsResult = AttributeSet.builder(); |
117 | 116 | forkRefsResult.addAll(referencesBuilder.get()); |
118 | 117 |
|
119 | | - for (var child : fork.children()) { |
| 118 | + for (var fork_child : fork.children()) { |
120 | 119 | referencesBuilder.set(AttributeSet.builder()); |
121 | | - var return_result = child.forEachDownMayReturnEarly(processingLambda.get()); |
122 | | - // No nested Forks for now... |
123 | | - assert return_result; |
| 120 | + var nested_early_return = fork_child.forEachDownMayReturnEarly(processingLambda.get()); |
| 121 | + // This assert is just for good measure. FORKs within FORKs is yet not supported. |
| 122 | + assert nested_early_return == false; |
| 123 | + |
| 124 | + // See below, no references, means we should return all fields (*). |
124 | 125 | if (referencesBuilder.get().isEmpty()) { |
125 | | - needsAllFields.set(true); |
126 | | - // Early return. |
127 | | - return false; |
| 126 | + projectAll.set(true); |
| 127 | + // Return early, we'll be returning all references no matter what the remainder of the query is. |
| 128 | + breakEarly.set(true); |
| 129 | + return; |
128 | 130 | } |
129 | 131 | forkRefsResult.addAll(referencesBuilder.get()); |
130 | 132 | } |
131 | 133 |
|
132 | 134 | forkRefsResult.removeIf(attr -> attr.name().equals(Fork.FORK_FIELD)); |
133 | 135 | referencesBuilder.set(forkRefsResult); |
134 | | - return false; |
| 136 | + |
| 137 | + // Return early, we've already explored all fork branches. |
| 138 | + breakEarly.set(true); |
| 139 | + return; |
135 | 140 | } else if (p instanceof RegexExtract re) { // for Grok and Dissect |
136 | 141 | // keep the inputs needed by Grok/Dissect |
137 | 142 | referencesBuilder.get().addAll(re.input().references()); |
@@ -210,13 +215,10 @@ public static PreAnalysisResult resolveFieldNames(LogicalPlan parsed, EnrichReso |
210 | 215 | .removeIf(attr -> matchByName(attr, ne.name(), keepRefs.contains(attr) || dropWildcardRefs.contains(attr))); |
211 | 216 | }); |
212 | 217 | } |
213 | | - |
214 | | - // No early return. |
215 | | - return true; |
216 | 218 | }); |
217 | 219 | parsed.forEachDownMayReturnEarly(processingLambda.get()); |
218 | 220 |
|
219 | | - if (needsAllFields.get()) { |
| 221 | + if (projectAll.get()) { |
220 | 222 | return new PreAnalysisResult(enrichResolution, IndexResolver.ALL_FIELDS, Set.of()); |
221 | 223 | } |
222 | 224 |
|
|
0 commit comments