diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 97da362eebe82..ad44f19a14079 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -2160,6 +2160,31 @@ public void testLongSortOptimizationCorrectResults() { ); } + public void testSortMixedFieldTypesSeveralDocs() { + assertAcked( + prepareCreate("index_long").setMapping("foo", "type=long"), + prepareCreate("index_double").setMapping("foo", "type=double") + ); + + List builders = new ArrayList<>(); + + for (int i = 0; i < 10; i++) { + builders.add(prepareIndex("index_long").setId(String.valueOf(i)).setSource("foo", i)); + builders.add(prepareIndex("index_double").setId(String.valueOf(i)).setSource("foo", i)); + } + indexRandom(true, false, builders); + + String errMsg = "Can't sort on field [foo]; the field has incompatible sort types"; + + { // mixing long and double types is not allowed + SearchPhaseExecutionException exc = expectThrows( + SearchPhaseExecutionException.class, + prepareSearch("index_long", "index_double").addSort(new FieldSortBuilder("foo")).setSize(20) + ); + assertThat(exc.getCause().toString(), containsString(errMsg)); + } + } + public void testSortMixedFieldTypes() throws IOException { assertAcked( prepareCreate("index_long").setMapping("foo", "type=long"), diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index a8f22eb1cc572..75a63acd3cfd1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -462,6 +462,7 @@ protected void doRun(Map shardIndexMap) { } } } + final AtomicBoolean phaseFailureEncountered = new AtomicBoolean(false); perNodeQueries.forEach((routing, request) -> { if (request.shards.size() == 1) { executeAsSingleRequest(routing, request.shards.getFirst()); @@ -483,6 +484,7 @@ protected void doRun(Map shardIndexMap) { } searchTransportService.transportService() .sendChildRequest(connection, NODE_SEARCH_ACTION_NAME, request, task, new TransportResponseHandler() { + @Override public NodeQueryResponse read(StreamInput in) throws IOException { return new NodeQueryResponse(in); @@ -533,7 +535,13 @@ public void handleException(TransportException e) { if (results instanceof QueryPhaseResultConsumer queryPhaseResultConsumer) { queryPhaseResultConsumer.failure.compareAndSet(null, cause); } - onPhaseFailure(getName(), "", cause); + if (phaseFailureEncountered.compareAndSet(false, true)) { + logger.debug("Raising phase failure for " + cause + " while executing search on node " + routing.nodeId()); + onPhaseFailure(getName(), "", cause); + } else { + // we already failed the phase, ignore any additional failures and just log them if debug enabled + logger.debug("Ignoring additional phase failure for " + cause + " from search on node " + routing.nodeId()); + } } } }); @@ -541,7 +549,7 @@ public void handleException(TransportException e) { } private void executeWithoutBatching(CanMatchPreFilterSearchPhase.SendingTarget targetNode, NodeQueryRequest request) { - for (ShardToQuery shard : request.shards) { + for (SearchQueryThenFetchAsyncAction.ShardToQuery shard : request.shards) { executeAsSingleRequest(targetNode, shard); } }