4747import  java .util .function .BiConsumer ;
4848import  java .util .function .Function ;
4949import  java .util .function .LongConsumer ;
50- import  java .util .function .Supplier ;
5150
5251import  static  org .elasticsearch .search .aggregations .InternalOrder .isKeyOrder ;
5352
@@ -296,7 +295,7 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
296295                    try  (ObjectArrayPriorityQueue <B > ordered  = buildPriorityQueue (size )) {
297296                        B  spare  = null ;
298297                        BytesKeyedBucketOrds .BucketOrdsEnum  ordsEnum  = bucketOrds .ordsEnum (owningOrd );
299-                         Supplier <B > emptyBucketBuilder  = emptyBucketBuilder (owningOrd );
298+                         BucketUpdater <B > bucketUpdater  = bucketUpdater (owningOrd );
300299                        while  (ordsEnum .next ()) {
301300                            long  docCount  = bucketDocCount (ordsEnum .ord ());
302301                            otherDocCounts .increment (ordIdx , docCount );
@@ -305,9 +304,9 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
305304                            }
306305                            if  (spare  == null ) {
307306                                checkRealMemoryCBForInternalBucket ();
308-                                 spare  = emptyBucketBuilder . get ();
307+                                 spare  = buildEmptyBucket ();
309308                            }
310-                             updateBucket (spare , ordsEnum , docCount );
309+                             bucketUpdater . updateBucket (spare , ordsEnum , docCount );
311310                            spare  = ordered .insertWithOverflow (spare );
312311                        }
313312
@@ -348,9 +347,9 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
348347        abstract  void  collectZeroDocEntriesIfNeeded (long  owningBucketOrd , boolean  excludeDeletedDocs ) throws  IOException ;
349348
350349        /** 
351-          * Build an empty temporary  bucket. 
350+          * Build an empty bucket. 
352351         */ 
353-         abstract  Supplier < B >  emptyBucketBuilder ( long   owningBucketOrd );
352+         abstract  B   buildEmptyBucket ( );
354353
355354        /** 
356355         * Build a {@link PriorityQueue} to sort the buckets. After we've 
@@ -362,7 +361,7 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
362361         * Update fields in {@code spare} to reflect information collected for 
363362         * this bucket ordinal. 
364363         */ 
365-         abstract  void   updateBucket ( B   spare ,  BytesKeyedBucketOrds . BucketOrdsEnum   ordsEnum ,  long  docCount )  throws   IOException ;
364+         abstract  BucketUpdater < B >  bucketUpdater ( long  owningBucketOrd ) ;
366365
367366        /** 
368367         * Build an array to hold the "top" buckets for each ordinal. 
@@ -399,6 +398,10 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
399398        abstract  R  buildEmptyResult ();
400399    }
401400
401+     interface  BucketUpdater <B  extends  InternalMultiBucketAggregation .InternalBucket > {
402+         void  updateBucket (B  spare , BytesKeyedBucketOrds .BucketOrdsEnum  ordsEnum , long  docCount ) throws  IOException ;
403+     }
404+ 
402405    /** 
403406     * Builds results for the standard {@code terms} aggregation. 
404407     */ 
@@ -490,8 +493,8 @@ private void collectZeroDocEntries(BinaryDocValues values, Bits liveDocs, int ma
490493        }
491494
492495        @ Override 
493-         Supplier < StringTerms .Bucket >  emptyBucketBuilder ( long   owningBucketOrd ) {
494-             return  () ->  new  StringTerms .Bucket (new  BytesRef (), 0 , null , showTermDocCountError , 0 , format );
496+         StringTerms .Bucket   buildEmptyBucket ( ) {
497+             return  new  StringTerms .Bucket (new  BytesRef (), 0 , null , showTermDocCountError , 0 , format );
495498        }
496499
497500        @ Override 
@@ -500,10 +503,12 @@ ObjectArrayPriorityQueue<StringTerms.Bucket> buildPriorityQueue(int size) {
500503        }
501504
502505        @ Override 
503-         void  updateBucket (StringTerms .Bucket  spare , BytesKeyedBucketOrds .BucketOrdsEnum  ordsEnum , long  docCount ) throws  IOException  {
504-             ordsEnum .readValue (spare .termBytes );
505-             spare .docCount  = docCount ;
506-             spare .bucketOrd  = ordsEnum .ord ();
506+         BucketUpdater <StringTerms .Bucket > bucketUpdater (long  owningBucketOrd ) {
507+             return  (spare , ordsEnum , docCount ) -> {
508+                 ordsEnum .readValue (spare .termBytes );
509+                 spare .docCount  = docCount ;
510+                 spare .bucketOrd  = ordsEnum .ord ();
511+             };
507512        }
508513
509514        @ Override 
@@ -615,9 +620,8 @@ public void collect(int doc, long owningBucketOrd) throws IOException {
615620        void  collectZeroDocEntriesIfNeeded (long  owningBucketOrd , boolean  excludeDeletedDocs ) throws  IOException  {}
616621
617622        @ Override 
618-         Supplier <SignificantStringTerms .Bucket > emptyBucketBuilder (long  owningBucketOrd ) {
619-             long  subsetSize  = subsetSizes .get (owningBucketOrd );
620-             return  () -> new  SignificantStringTerms .Bucket (new  BytesRef (), 0 , subsetSize , 0 , 0 , null , format , 0 );
623+         SignificantStringTerms .Bucket  buildEmptyBucket () {
624+             return  new  SignificantStringTerms .Bucket (new  BytesRef (), 0 , 0 , null , format , 0 );
621625        }
622626
623627        @ Override 
@@ -626,20 +630,20 @@ ObjectArrayPriorityQueue<SignificantStringTerms.Bucket> buildPriorityQueue(int s
626630        }
627631
628632        @ Override 
629-         void   updateBucket ( SignificantStringTerms .Bucket   spare ,  BytesKeyedBucketOrds . BucketOrdsEnum   ordsEnum ,  long  docCount ) 
630-             throws   IOException  { 
631- 
632-             ordsEnum .readValue (spare .termBytes );
633-             spare .bucketOrd  = ordsEnum .ord ();
634-             spare .subsetDf  = docCount ;
635-             spare .supersetDf  = backgroundFrequencies .freq (spare .termBytes );
636-             spare . supersetSize  =  supersetSize ; 
637-             /* 
638-              * During  shard-local down-selection we use subset/superset stats  
639-              * that are for this shard only. Back at the central reducer these  
640-              * properties will be updated with global stats.  
641-              */  
642-             spare . updateScore ( significanceHeuristic ) ;
633+         BucketUpdater < SignificantStringTerms .Bucket >  bucketUpdater ( long  owningBucketOrd ) { 
634+             long   subsetSize  =  subsetSizes . get ( owningBucketOrd ); 
635+              return  ( spare ,  ordsEnum ,  docCount ) -> { 
636+                  ordsEnum .readValue (spare .termBytes );
637+                  spare .bucketOrd  = ordsEnum .ord ();
638+                  spare .subsetDf  = docCount ;
639+                  spare .supersetDf  = backgroundFrequencies .freq (spare .termBytes );
640+                  /* 
641+                  * During shard-local down-selection we use subset/superset stats  
642+                  * that are for this  shard only. Back at the central reducer these  
643+                  * properties will be updated with global stats.  
644+                  */  
645+                  spare . updateScore ( significanceHeuristic ,  subsetSize ,  supersetSize ); 
646+             } ;
643647        }
644648
645649        @ Override 
0 commit comments