-
Notifications
You must be signed in to change notification settings - Fork 25.6k
Use new source loader when lower docId is accessed #128320
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
332b443
8d27a83
119fee3
dd8a0c8
2609ed2
25bc848
d4e7062
267dc73
0f1b160
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,5 @@ | ||
| pr: 128320 | ||
| summary: Use new source loader when lower `docId` is accessed | ||
| area: Codec | ||
| type: bug | ||
| issues: [] |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -340,4 +340,108 @@ public void testLogsdbDefaultWithRecoveryUseSyntheticSource() throws IOException | |
| assertNull(settings.get("index.mapping.source.mode")); | ||
| assertEquals("true", settings.get(IndexSettings.LOGSDB_SORT_ON_HOST_NAME.getKey())); | ||
| } | ||
|
|
||
| public void testSyntheticSourceRuntimeFieldQueries() throws IOException { | ||
| String mappings = """ | ||
| { | ||
| "runtime": { | ||
| "message_length": { | ||
| "type": "long" | ||
| } | ||
| }, | ||
| "dynamic": false, | ||
| "properties": { | ||
| "@timestamp": { | ||
| "type": "date" | ||
| } | ||
| } | ||
| } | ||
| """; | ||
| String indexName = "test-foo"; | ||
| createIndex(indexName, Settings.builder().put("index.mode", "logsdb").build(), mappings); | ||
|
|
||
| int numDocs = 100_000; | ||
|
||
| var sb = new StringBuilder(); | ||
| var now = Instant.now(); | ||
|
|
||
| for (int i = 0; i < numDocs; i++) { | ||
| String msg = randomAlphaOfLength(20); | ||
| String messageLength = Integer.toString(msg.length()); | ||
| sb.append("{ \"create\": {} }").append('\n'); | ||
| sb.append(""" | ||
| {"@timestamp": "$now", "message_length": $l} | ||
| """.replace("$now", formatInstant(now)).replace("$l", messageLength)); | ||
| sb.append('\n'); | ||
| if (i != numDocs - 1) { | ||
| now = now.plusSeconds(1); | ||
| } | ||
|
|
||
| if (i % 1000 == 0) { | ||
| var bulkRequest = new Request("POST", "/" + indexName + "/_bulk"); | ||
| bulkRequest.setJsonEntity(sb.toString()); | ||
| var bulkResponse = client().performRequest(bulkRequest); | ||
| var bulkResponseBody = responseAsMap(bulkResponse); | ||
| assertThat(bulkResponseBody, Matchers.hasEntry("errors", false)); | ||
| sb = new StringBuilder(); | ||
| } | ||
| } | ||
|
|
||
| var bulkRequest = new Request("POST", "/" + indexName + "/_bulk"); | ||
| bulkRequest.setJsonEntity(sb.toString()); | ||
| bulkRequest.addParameter("refresh", "true"); | ||
| var bulkResponse = client().performRequest(bulkRequest); | ||
| var bulkResponseBody = responseAsMap(bulkResponse); | ||
| assertThat(bulkResponseBody, Matchers.hasEntry("errors", false)); | ||
|
|
||
| var forceMergeRequest = new Request("POST", "/" + indexName + "/_forcemerge"); | ||
| var forceMergeResponse = client().performRequest(forceMergeRequest); | ||
| assertOK(forceMergeResponse); | ||
|
|
||
| var searchRequest = new Request("POST", "/" + indexName + "/_search"); | ||
| searchRequest.setJsonEntity(""" | ||
| { | ||
| "size": 1, | ||
| "query": { | ||
| "bool": { | ||
| "should": [ | ||
| { | ||
| "range": { | ||
| "message_length": { | ||
| "gte": 1, | ||
| "lt": 900000 | ||
| } | ||
| } | ||
| }, | ||
| { | ||
| "range": { | ||
| "message_length": { | ||
| "gte": 900000, | ||
| "lt": 1000000 | ||
| } | ||
| } | ||
| } | ||
| ], | ||
| "minimum_should_match": "1", | ||
| "must_not": [ | ||
| { | ||
| "range": { | ||
| "message_length": { | ||
| "lt": 0 | ||
| } | ||
| } | ||
| } | ||
| ] | ||
| } | ||
| } | ||
| } | ||
| """); | ||
| var searchResponse = client().performRequest(searchRequest); | ||
| assertOK(searchResponse); | ||
| var searchResponseBody = responseAsMap(searchResponse); | ||
| var shardsHeader = (Map<?, ?>) searchResponseBody.get("_shards"); | ||
| assertThat(shardsHeader.get("failed"), equalTo(0)); | ||
| assertThat(shardsHeader.get("successful"), equalTo(1)); | ||
| assertThat(shardsHeader.get("skipped"), equalTo(0)); | ||
| logger.info("searchResponse: {}", searchResponseBody); | ||
|
||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe also incorporate the following into this comment:
with both synthetic and stored source the docid can go backwards. This isn't a problem for stored fields (somehow it validate when docid goes backwards and doesn't run into EOF like errors), but it is for doc values (which gets used with synthetic source).?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The issue here seems to be when multiple clauses in a query reference runtime fields on the same request, no? So it's not only about using the same runtime field twice.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes I think that is a more accurate way to describe the issue. I confirmed that, unsurprisingly, aggregating twice on the same runtime field does indeed work fine as is. So this is specific to there being 2+ query clauses referencing the same runtime field.