Skip to content

Commit 5e2cca2

Browse files
authored
fix: golangci-lint findings, part 2 (#2164)
Since I started using this project on my production workloads, I would like to contribute to the project. This second change fixes easy stuff like: - `golangci-lint run --go 1.21 ./... | grep "SA1012:"` -> __SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)__ - `golangci-lint run --go 1.21 ./... | grep "S1039:"` -> __S1039: unnecessary use of fmt.Sprintf (gosimple)__ - `golangci-lint run --go 1.21 ./... | grep "S1011:"` -> __S1011: should replace loop with `q.Conjuncts = append(q.Conjuncts, aq...)` (gosimple)__ - `golangci-lint run --go 1.21 ./... | grep "ineffectual assignment to err"` - `golangci-lint run --go 1.21 ./... | grep "S1005:"` -> __S1005: unnecessary assignment to the blank identifier (gosimple)__ - `golangci-lint run --go 1.21 ./... | grep "S1021:"` -> __S1021: should merge variable declaration with assignment on next line (gosimple)__ - `golangci-lint run --go 1.21 ./... | grep "Error return value of"` I hope this contribution starts helping and standardizing the code according to the golang version used. __NOTE:__ The `test` catched an error with my changes, since context argument as `nil` is not used anymore, additionally I change some validation in the file `search_geoshape.go` How was previously: ```go ... var bufPool *s2.GeoBufferPool if ctx != nil { bufPool = ctx.Value(search.GeoBufferPoolCallbackKey).(search.GeoBufferPoolCallbackFunc)() } ... ``` How is now ```go ... var bufPool *s2.GeoBufferPool if ctx.Value(search.GeoBufferPoolCallbackKey) != nil { bufPool = ctx.Value(search.GeoBufferPoolCallbackKey).(search.GeoBufferPoolCallbackFunc)() } ... ```
1 parent dd102de commit 5e2cca2

39 files changed

+2429
-1165
lines changed

cmd/bleve/cmd/bulk.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,7 @@ var bulkCmd = &cobra.Command{
6767
break
6868
}
6969

70-
var doc interface{}
71-
doc = b
70+
var doc interface{} = b
7271
var err error
7372
if parseJSON {
7473
err = json.Unmarshal(b, &doc)

index/scorch/builder_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
package scorch
1616

1717
import (
18+
"context"
1819
"fmt"
1920
"os"
2021
"testing"
@@ -59,7 +60,6 @@ func TestBuilder(t *testing.T) {
5960
}
6061

6162
checkIndex(t, tmpDir, []byte("hello"), "name", 10)
62-
6363
}
6464

6565
func checkIndex(t *testing.T, path string, term []byte, field string, expectCount int) {
@@ -101,7 +101,7 @@ func checkIndex(t *testing.T, path string, term []byte, field string, expectCoun
101101
}
102102

103103
// run a search for hello
104-
tfr, err := r.TermFieldReader(nil, term, field, false, false, false)
104+
tfr, err := r.TermFieldReader(context.TODO(), term, field, false, false, false)
105105
if err != nil {
106106
t.Errorf("error accessing term field reader: %v", err)
107107
} else {

index/scorch/field_dict_test.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,14 +51,12 @@ func TestIndexFieldDict(t *testing.T) {
5151
}
5252
}()
5353

54-
var expectedCount uint64
5554
doc := document.NewDocument("1")
5655
doc.AddField(document.NewTextField("name", []uint64{}, []byte("test")))
5756
err = idx.Update(doc)
5857
if err != nil {
5958
t.Errorf("Error updating index: %v", err)
6059
}
61-
expectedCount++
6260

6361
doc = document.NewDocument("2")
6462
doc.AddField(document.NewTextFieldWithAnalyzer("name", []uint64{}, []byte("test test test"), testAnalyzer))
@@ -68,7 +66,6 @@ func TestIndexFieldDict(t *testing.T) {
6866
if err != nil {
6967
t.Errorf("Error updating index: %v", err)
7068
}
71-
expectedCount++
7269

7370
indexReader, err := idx.Reader()
7471
if err != nil {

index/scorch/persister.go

Lines changed: 30 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,8 @@ OUTER:
240240
}
241241

242242
func notifyMergeWatchers(lastPersistedEpoch uint64,
243-
persistWatchers []*epochWatcher) []*epochWatcher {
243+
persistWatchers []*epochWatcher,
244+
) []*epochWatcher {
244245
var watchersNext []*epochWatcher
245246
for _, w := range persistWatchers {
246247
if w.epoch < lastPersistedEpoch {
@@ -254,8 +255,8 @@ func notifyMergeWatchers(lastPersistedEpoch uint64,
254255

255256
func (s *Scorch) pausePersisterForMergerCatchUp(lastPersistedEpoch uint64,
256257
lastMergedEpoch uint64, persistWatchers []*epochWatcher,
257-
po *persisterOptions) (uint64, []*epochWatcher) {
258-
258+
po *persisterOptions,
259+
) (uint64, []*epochWatcher) {
259260
// First, let the watchers proceed if they lag behind
260261
persistWatchers = notifyMergeWatchers(lastPersistedEpoch, persistWatchers)
261262

@@ -339,7 +340,8 @@ func (s *Scorch) parsePersisterOptions() (*persisterOptions, error) {
339340
}
340341

341342
func (s *Scorch) persistSnapshot(snapshot *IndexSnapshot,
342-
po *persisterOptions) error {
343+
po *persisterOptions,
344+
) error {
343345
// Perform in-memory segment merging only when the memory pressure is
344346
// below the configured threshold, else the persister performs the
345347
// direct persistence of segments.
@@ -365,7 +367,8 @@ var DefaultMinSegmentsForInMemoryMerge = 2
365367
// persistSnapshotMaybeMerge examines the snapshot and might merge and
366368
// persist the in-memory zap segments if there are enough of them
367369
func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot) (
368-
bool, error) {
370+
bool, error,
371+
) {
369372
// collect the in-memory zap segments (SegmentBase instances)
370373
var sbs []segment.Segment
371374
var sbsDrops []*roaring.Bitmap
@@ -468,7 +471,8 @@ func copyToDirectory(srcPath string, d index.Directory) (int64, error) {
468471
}
469472

470473
func persistToDirectory(seg segment.UnpersistedSegment, d index.Directory,
471-
path string) error {
474+
path string,
475+
) error {
472476
if d == nil {
473477
return seg.Persist(path)
474478
}
@@ -491,7 +495,8 @@ func persistToDirectory(seg segment.UnpersistedSegment, d index.Directory,
491495

492496
func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
493497
segPlugin SegmentPlugin, d index.Directory) (
494-
[]string, map[uint64]string, error) {
498+
[]string, map[uint64]string, error,
499+
) {
495500
snapshotsBucket, err := tx.CreateBucketIfNotExists(boltSnapshotsBucket)
496501
if err != nil {
497502
return nil, nil, err
@@ -713,16 +718,18 @@ func zapFileName(epoch uint64) string {
713718

714719
// bolt snapshot code
715720

716-
var boltSnapshotsBucket = []byte{'s'}
717-
var boltPathKey = []byte{'p'}
718-
var boltDeletedKey = []byte{'d'}
719-
var boltInternalKey = []byte{'i'}
720-
var boltMetaDataKey = []byte{'m'}
721-
var boltMetaDataSegmentTypeKey = []byte("type")
722-
var boltMetaDataSegmentVersionKey = []byte("version")
723-
var boltMetaDataTimeStamp = []byte("timeStamp")
724-
var boltStatsKey = []byte("stats")
725-
var TotBytesWrittenKey = []byte("TotBytesWritten")
721+
var (
722+
boltSnapshotsBucket = []byte{'s'}
723+
boltPathKey = []byte{'p'}
724+
boltDeletedKey = []byte{'d'}
725+
boltInternalKey = []byte{'i'}
726+
boltMetaDataKey = []byte{'m'}
727+
boltMetaDataSegmentTypeKey = []byte("type")
728+
boltMetaDataSegmentVersionKey = []byte("version")
729+
boltMetaDataTimeStamp = []byte("timeStamp")
730+
boltStatsKey = []byte("stats")
731+
TotBytesWrittenKey = []byte("TotBytesWritten")
732+
)
726733

727734
func (s *Scorch) loadFromBolt() error {
728735
return s.rootBolt.View(func(tx *bolt.Tx) error {
@@ -800,7 +807,6 @@ func (s *Scorch) LoadSnapshot(epoch uint64) (rv *IndexSnapshot, err error) {
800807
}
801808

802809
func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) {
803-
804810
rv := &IndexSnapshot{
805811
parent: s,
806812
internal: make(map[string][]byte),
@@ -947,7 +953,8 @@ var RollbackSamplingInterval = 0 * time.Minute
947953
var RollbackRetentionFactor = float64(0.5)
948954

949955
func getTimeSeriesSnapshots(maxDataPoints int, interval time.Duration,
950-
snapshots []*snapshotMetaData) (int, map[uint64]time.Time) {
956+
snapshots []*snapshotMetaData,
957+
) (int, map[uint64]time.Time) {
951958
if interval == 0 {
952959
return len(snapshots), map[uint64]time.Time{}
953960
}
@@ -994,8 +1001,8 @@ func getTimeSeriesSnapshots(maxDataPoints int, interval time.Duration,
9941001
// by a time duration of RollbackSamplingInterval.
9951002
func getProtectedSnapshots(rollbackSamplingInterval time.Duration,
9961003
numSnapshotsToKeep int,
997-
persistedSnapshots []*snapshotMetaData) map[uint64]time.Time {
998-
1004+
persistedSnapshots []*snapshotMetaData,
1005+
) map[uint64]time.Time {
9991006
lastPoint, protectedEpochs := getTimeSeriesSnapshots(numSnapshotsToKeep,
10001007
rollbackSamplingInterval, persistedSnapshots)
10011008
if len(protectedEpochs) < numSnapshotsToKeep {
@@ -1162,7 +1169,8 @@ func (s *Scorch) removeOldZapFiles() error {
11621169
// Hence we try to retain atleast retentionFactor portion worth of old snapshots
11631170
// in such a scenario using the following function
11641171
func getBoundaryCheckPoint(retentionFactor float64,
1165-
checkPoints []*snapshotMetaData, timeStamp time.Time) time.Time {
1172+
checkPoints []*snapshotMetaData, timeStamp time.Time,
1173+
) time.Time {
11661174
if checkPoints != nil {
11671175
boundary := checkPoints[int(math.Floor(float64(len(checkPoints))*
11681176
retentionFactor))]

index/scorch/reader_test.go

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
package scorch
1616

1717
import (
18+
"context"
1819
"encoding/binary"
1920
"reflect"
2021
"testing"
@@ -82,7 +83,7 @@ func TestIndexReader(t *testing.T) {
8283
}()
8384

8485
// first look for a term that doesn't exist
85-
reader, err := indexReader.TermFieldReader(nil, []byte("nope"), "name", true, true, true)
86+
reader, err := indexReader.TermFieldReader(context.TODO(), []byte("nope"), "name", true, true, true)
8687
if err != nil {
8788
t.Errorf("Error accessing term field reader: %v", err)
8889
}
@@ -95,7 +96,7 @@ func TestIndexReader(t *testing.T) {
9596
t.Fatal(err)
9697
}
9798

98-
reader, err = indexReader.TermFieldReader(nil, []byte("test"), "name", true, true, true)
99+
reader, err = indexReader.TermFieldReader(context.TODO(), []byte("test"), "name", true, true, true)
99100
if err != nil {
100101
t.Errorf("Error accessing term field reader: %v", err)
101102
}
@@ -145,7 +146,7 @@ func TestIndexReader(t *testing.T) {
145146
},
146147
},
147148
}
148-
tfr, err := indexReader.TermFieldReader(nil, []byte("rice"), "desc", true, true, true)
149+
tfr, err := indexReader.TermFieldReader(context.TODO(), []byte("rice"), "desc", true, true, true)
149150
if err != nil {
150151
t.Errorf("unexpected error: %v", err)
151152
}
@@ -163,7 +164,7 @@ func TestIndexReader(t *testing.T) {
163164
}
164165

165166
// now test usage of advance
166-
reader, err = indexReader.TermFieldReader(nil, []byte("test"), "name", true, true, true)
167+
reader, err = indexReader.TermFieldReader(context.TODO(), []byte("test"), "name", true, true, true)
167168
if err != nil {
168169
t.Errorf("Error accessing term field reader: %v", err)
169170
}
@@ -194,7 +195,7 @@ func TestIndexReader(t *testing.T) {
194195
}
195196

196197
// now test creating a reader for a field that doesn't exist
197-
reader, err = indexReader.TermFieldReader(nil, []byte("water"), "doesnotexist", true, true, true)
198+
reader, err = indexReader.TermFieldReader(context.TODO(), []byte("water"), "doesnotexist", true, true, true)
198199
if err != nil {
199200
t.Errorf("Error accessing term field reader: %v", err)
200201
}
@@ -216,7 +217,6 @@ func TestIndexReader(t *testing.T) {
216217
if match != nil {
217218
t.Errorf("expected nil, got %v", match)
218219
}
219-
220220
}
221221

222222
func TestIndexDocIdReader(t *testing.T) {
@@ -290,10 +290,17 @@ func TestIndexDocIdReader(t *testing.T) {
290290
}()
291291

292292
id, err := reader.Next()
293+
if err != nil {
294+
t.Fatal(err)
295+
}
296+
293297
count := uint64(0)
294298
for id != nil {
295299
count++
296300
id, err = reader.Next()
301+
if err != nil {
302+
t.Fatal(err)
303+
}
297304
}
298305
if count != expectedCount {
299306
t.Errorf("expected %d, got %d", expectedCount, count)
@@ -418,6 +425,10 @@ func TestIndexDocIdOnlyReader(t *testing.T) {
418425
}()
419426

420427
id, err := reader.Next()
428+
if err != nil {
429+
t.Fatal(err)
430+
}
431+
421432
count := uint64(0)
422433
for id != nil {
423434
count++
@@ -478,10 +489,17 @@ func TestIndexDocIdOnlyReader(t *testing.T) {
478489
}()
479490

480491
id, err = reader3.Next()
492+
if err != nil {
493+
t.Fatal(err)
494+
}
495+
481496
count = uint64(0)
482497
for id != nil {
483498
count++
484499
id, err = reader3.Next()
500+
if err != nil {
501+
t.Fatal(err)
502+
}
485503
}
486504
if count != 1 {
487505
t.Errorf("expected 1, got %d", count)
@@ -568,7 +586,6 @@ func TestIndexDocIdOnlyReader(t *testing.T) {
568586
// if !id.Equals(index.IndexInternalID("9")) {
569587
// t.Errorf("expected to find id '9', got '%s'", id)
570588
// }
571-
572589
}
573590

574591
func TestSegmentIndexAndLocalDocNumFromGlobal(t *testing.T) {

index/scorch/scorch.go

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,8 @@ type internalStats struct {
110110

111111
func NewScorch(storeName string,
112112
config map[string]interface{},
113-
analysisQueue *index.AnalysisQueue) (index.Index, error) {
113+
analysisQueue *index.AnalysisQueue,
114+
) (index.Index, error) {
114115
rv := &Scorch{
115116
version: Version,
116117
config: config,
@@ -137,7 +138,9 @@ func NewScorch(storeName string,
137138

138139
typ, ok := config["spatialPlugin"].(string)
139140
if ok {
140-
rv.loadSpatialAnalyzerPlugin(typ)
141+
if err := rv.loadSpatialAnalyzerPlugin(typ); err != nil {
142+
return nil, err
143+
}
141144
}
142145

143146
rv.root = &IndexSnapshot{parent: rv, refs: 1, creator: "NewScorch"}
@@ -230,7 +233,7 @@ func (s *Scorch) openBolt() error {
230233
s.unsafeBatch = true
231234
}
232235

233-
var rootBoltOpt = *bolt.DefaultOptions
236+
rootBoltOpt := *bolt.DefaultOptions
234237
if s.readOnly {
235238
rootBoltOpt.ReadOnly = true
236239
rootBoltOpt.OpenFile = func(path string, flag int, mode os.FileMode) (*os.File, error) {
@@ -244,7 +247,7 @@ func (s *Scorch) openBolt() error {
244247
}
245248
} else {
246249
if s.path != "" {
247-
err := os.MkdirAll(s.path, 0700)
250+
err := os.MkdirAll(s.path, 0o700)
248251
if err != nil {
249252
return err
250253
}
@@ -263,7 +266,7 @@ func (s *Scorch) openBolt() error {
263266
rootBoltPath := s.path + string(os.PathSeparator) + "root.bolt"
264267
var err error
265268
if s.path != "" {
266-
s.rootBolt, err = bolt.Open(rootBoltPath, 0600, &rootBoltOpt)
269+
s.rootBolt, err = bolt.Open(rootBoltPath, 0o600, &rootBoltOpt)
267270
if err != nil {
268271
return err
269272
}
@@ -325,7 +328,9 @@ func (s *Scorch) openBolt() error {
325328

326329
typ, ok := s.config["spatialPlugin"].(string)
327330
if ok {
328-
s.loadSpatialAnalyzerPlugin(typ)
331+
if err := s.loadSpatialAnalyzerPlugin(typ); err != nil {
332+
return err
333+
}
329334
}
330335

331336
return nil
@@ -481,8 +486,8 @@ func (s *Scorch) Batch(batch *index.Batch) (err error) {
481486
}
482487

483488
func (s *Scorch) prepareSegment(newSegment segment.Segment, ids []string,
484-
internalOps map[string][]byte, persistedCallback index.BatchCallback, stats *fieldStats) error {
485-
489+
internalOps map[string][]byte, persistedCallback index.BatchCallback, stats *fieldStats,
490+
) error {
486491
// new introduction
487492
introduction := &segmentIntroduction{
488493
id: atomic.AddUint64(&s.nextSegmentID, 1),
@@ -576,7 +581,8 @@ func (s *Scorch) BytesReadQueryTime() uint64 {
576581
}
577582

578583
func (s *Scorch) diskFileStats(rootSegmentPaths map[string]struct{}) (uint64,
579-
uint64, uint64) {
584+
uint64, uint64,
585+
) {
580586
var numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot uint64
581587
if s.path != "" {
582588
files, err := os.ReadDir(s.path)
@@ -832,7 +838,6 @@ func (fs *fieldStats) Store(statName, fieldName string, value uint64) {
832838

833839
// Combine the given stats map with the existing map
834840
func (fs *fieldStats) Aggregate(stats segment.FieldStats) {
835-
836841
statMap := stats.Fetch()
837842
if statMap == nil {
838843
return

0 commit comments

Comments
 (0)