@@ -24,6 +24,7 @@ import (
2424 "math"
2525 "os"
2626 "path/filepath"
27+ "slices"
2728 "sort"
2829 "strconv"
2930 "strings"
@@ -381,11 +382,11 @@ type flushable struct {
381382
382383// number workers which parallely perform an in-memory merge of the segments
383384// followed by a flush operation.
384- var DefaultNumPersisterWorkers = 8
385+ var DefaultNumPersisterWorkers = 1
385386
386387// maximum size of data that a single worker is allowed to perform the in-memory
387388// merge operation.
388- var DefaultMaxSizeInMemoryMerge = 200 * 1024 * 1024
389+ var DefaultMaxSizeInMemoryMerge = 0
389390
390391func legacyFlushBehaviour (maxSizeInMemoryMerge , numPersisterWorkers int ) bool {
391392 // DefaultMaxSizeInMemoryMerge = 0 is a special value to preserve the leagcy
@@ -432,27 +433,20 @@ func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot, po *persiste
432433 // constructs a flushSet where each flushable object contains a set of segments
433434 // to be merged and flushed out to disk.
434435 for i , snapshot := range snapshot .segment {
435- if totSize >= po .MaxSizeInMemoryMerge {
436- if len (sbs ) >= DefaultMinSegmentsForInMemoryMerge {
437- numSegsToFlushOut += len (sbs )
438- val := & flushable {
439- segments : make ([]segment.Segment , len (sbs )),
440- drops : make ([]* roaring.Bitmap , len (sbsDrops )),
441- sbIdxs : make ([]int , len (sbsIndexes )),
442- totDocs : totDocs ,
443- }
444- copy (val .segments , sbs )
445- copy (val .drops , sbsDrops )
446- copy (val .sbIdxs , sbsIndexes )
447- flushSet = append (flushSet , val )
448-
449- oldSegIdxs = append (oldSegIdxs , sbsIndexes ... )
450- sbs = sbs [:0 ]
451- sbsDrops = sbsDrops [:0 ]
452- sbsIndexes = sbsIndexes [:0 ]
453- totSize = 0
454- totDocs = 0
436+ if totSize >= po .MaxSizeInMemoryMerge &&
437+ len (sbs ) >= DefaultMinSegmentsForInMemoryMerge {
438+ numSegsToFlushOut += len (sbs )
439+ val := & flushable {
440+ segments : slices .Clone (sbs ),
441+ drops : slices .Clone (sbsDrops ),
442+ sbIdxs : slices .Clone (sbsIndexes ),
443+ totDocs : totDocs ,
455444 }
445+ flushSet = append (flushSet , val )
446+ oldSegIdxs = append (oldSegIdxs , sbsIndexes ... )
447+
448+ sbs , sbsDrops , sbsIndexes = sbs [:0 ], sbsDrops [:0 ], sbsIndexes [:0 ]
449+ totSize , totDocs = 0 , 0
456450 }
457451
458452 if len (flushSet ) >= int (po .NumPersisterWorkers ) {
@@ -471,22 +465,13 @@ func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot, po *persiste
471465 if len (flushSet ) < po .NumPersisterWorkers {
472466 numSegsToFlushOut += len (sbs )
473467 val := & flushable {
474- segments : make ([]segment. Segment , len (sbs ) ),
475- drops : make ([] * roaring. Bitmap , len (sbsDrops ) ),
476- sbIdxs : make ([] int , len ( sbsIndexes ) ),
468+ segments : slices . Clone (sbs ),
469+ drops : slices . Clone (sbsDrops ),
470+ sbIdxs : slices . Clone ( sbsIndexes ),
477471 totDocs : totDocs ,
478472 }
479- copy (val .segments , sbs )
480- copy (val .drops , sbsDrops )
481- copy (val .sbIdxs , sbsIndexes )
482473 flushSet = append (flushSet , val )
483-
484474 oldSegIdxs = append (oldSegIdxs , sbsIndexes ... )
485- sbs = sbs [:0 ]
486- sbsDrops = sbsDrops [:0 ]
487- sbsIndexes = sbsIndexes [:0 ]
488- totSize = 0
489- totDocs = 0
490475 }
491476 }
492477
0 commit comments