@@ -80,9 +80,9 @@ public static void init() throws Exception {
8080 dir = newDirectory ();
8181 IndexWriterConfig iwc = new IndexWriterConfig ();
8282 RandomIndexWriter w = new RandomIndexWriter (random (), dir , iwc );
83- // the upper bound is higher than 2048 so that in some cases we time out after the first batch of bulk scoring, but before
83+ // the upper bound is higher than 4096 so that in some cases we time out after the first batch of bulk scoring, but before
8484 // getting to the end of the first segment
85- numDocs = scaledRandomIntBetween (500 , 2500 );
85+ numDocs = scaledRandomIntBetween (500 , 4500 );
8686 for (int i = 0 ; i < numDocs ; ++i ) {
8787 Document doc = new Document ();
8888 doc .add (new StringField ("field" , Integer .toString (i ), Field .Store .NO ));
@@ -309,9 +309,9 @@ public void testBulkScorerTimeout() throws IOException {
309309 QueryPhase .executeQuery (context );
310310 assertTrue (context .queryResult ().searchTimedOut ());
311311 int firstSegmentMaxDoc = reader .leaves ().get (0 ).reader ().maxDoc ();
312- // See CancellableBulkScorer#INITIAL_INTERVAL for the source of 2048 : we always score the first
313- // batch of up to 2048 docs, and only then raise the timeout
314- assertEquals (Math .min (2048 , firstSegmentMaxDoc ), context .queryResult ().topDocs ().topDocs .totalHits .value ());
312+ // See CancellableBulkScorer#INITIAL_INTERVAL for the source of 4096 : we always score the first
313+ // batch of up to 4096 docs, and only then raise the timeout
314+ assertEquals (Math .min (4096 , firstSegmentMaxDoc ), context .queryResult ().topDocs ().topDocs .totalHits .value ());
315315 assertEquals (Math .min (size , firstSegmentMaxDoc ), context .queryResult ().topDocs ().topDocs .scoreDocs .length );
316316 }
317317 }
0 commit comments