Skip to content

Commit 093a14f

Browse files
committed
nocommitsbgoe
1 parent 5284db0 commit 093a14f

File tree

2 files changed

+6
-15
lines changed

2 files changed

+6
-15
lines changed

x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/read/ValuesFromManyReader.java

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import org.elasticsearch.search.fetch.StoredFieldsSpec;
2020

2121
import java.io.IOException;
22-
import java.util.Arrays;
2322

2423
/**
2524
* Loads values from a many leaves. Much less efficient than {@link ValuesFromSingleReader}.
@@ -59,6 +58,7 @@ class Run implements Releasable {
5958
}
6059

6160
void run(int offset) throws IOException {
61+
assert offset == 0; // TODO allow non-0 offset to support splitting pages
6262
for (int f = 0; f < operator.fields.length; f++) {
6363
/*
6464
* Important note: each field has a desired type, which might not match the mapped type (in the case of union-types).
@@ -97,20 +97,11 @@ void run(int offset) throws IOException {
9797
read(docs.docs().getInt(p), shard);
9898
i++;
9999
}
100-
buildBlocks(offset, i);
100+
buildBlocks();
101101
}
102102
}
103103

104-
private void buildBlocks(int offset, int end) {
105-
int[] positions = backwards;
106-
if (offset > 0 || end < positions.length) {
107-
// NOCOMMIT this doesn't make sense for shuffled arrays.
108-
/*
109-
* We're loading in ascending doc order, not ascending block position order
110-
* So we can't early terminate like this.
111-
*/
112-
positions = Arrays.copyOfRange(positions, offset, end);
113-
}
104+
private void buildBlocks() {
114105
for (int f = 0; f < target.length; f++) {
115106
for (int s = 0; s < operator.shardContexts.size(); s++) {
116107
if (builders[f][s] != null) {
@@ -120,9 +111,9 @@ private void buildBlocks(int offset, int end) {
120111
}
121112
}
122113
try (Block targetBlock = fieldTypeBuilders[f].build()) {
123-
target[f] = targetBlock.filter(positions);
114+
target[f] = targetBlock.filter(backwards);
124115
}
125-
operator.sanityCheckBlock(rowStride[f], positions.length, target[f], f);
116+
operator.sanityCheckBlock(rowStride[f], backwards.length, target[f], f);
126117
}
127118
}
128119

x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/read/ValuesFromSingleReader.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class ValuesFromSingleReader extends ValuesReader {
4949

5050
@Override
5151
protected void load(Block[] target, int offset) throws IOException {
52-
assert offset == 0; // NOCOMMIT implement me
52+
assert offset == 0; // TODO allow non-0 offset to support splitting pages
5353
if (docs.singleSegmentNonDecreasing()) {
5454
loadFromSingleLeaf(target, new BlockLoader.Docs() {
5555
@Override

0 commit comments

Comments
 (0)