diff --git a/cmd/bee/cmd/db_test.go b/cmd/bee/cmd/db_test.go index f52da4bd0d2..7d32b5c3080 100644 --- a/cmd/bee/cmd/db_test.go +++ b/cmd/bee/cmd/db_test.go @@ -39,7 +39,7 @@ func TestDBExportImport(t *testing.T) { chunks := make(map[string]int) nChunks := 10 - for i := 0; i < nChunks; i++ { + for range nChunks { ch := storagetest.GenerateTestRandomChunk() err := db1.ReservePutter().Put(ctx, ch) if err != nil { @@ -101,13 +101,13 @@ func TestDBExportImportPinning(t *testing.T) { pins := make(map[string]any) nChunks := 10 - for i := 0; i < 2; i++ { + for range 2 { rootAddr := swarm.RandAddress(t) collection, err := db1.NewCollection(ctx) if err != nil { t.Fatal(err) } - for j := 0; j < nChunks; j++ { + for range nChunks { ch := storagetest.GenerateTestRandomChunk() err = collection.Put(ctx, ch) if err != nil { @@ -186,7 +186,7 @@ func TestDBNuke_FLAKY(t *testing.T) { }, dataDir) nChunks := 10 - for i := 0; i < nChunks; i++ { + for range nChunks { ch := storagetest.GenerateTestRandomChunk() err := db.ReservePutter().Put(ctx, ch) if err != nil { @@ -241,7 +241,7 @@ func TestDBInfo(t *testing.T) { }, dir1) nChunks := 10 - for i := 0; i < nChunks; i++ { + for range nChunks { ch := storagetest.GenerateTestRandomChunk() err := db1.ReservePutter().Put(ctx, ch) if err != nil { diff --git a/pkg/accounting/accounting.go b/pkg/accounting/accounting.go index 45e4ab14f79..a877a70d0ec 100644 --- a/pkg/accounting/accounting.go +++ b/pkg/accounting/accounting.go @@ -312,10 +312,7 @@ func (a *Accounting) PrepareCredit(ctx context.Context, peer swarm.Address, pric } } - timeElapsedInSeconds := (a.timeNow().UnixMilli() - accountingPeer.refreshTimestampMilliseconds) / 1000 - if timeElapsedInSeconds > 1 { - timeElapsedInSeconds = 1 - } + timeElapsedInSeconds := min((a.timeNow().UnixMilli()-accountingPeer.refreshTimestampMilliseconds)/1000, 1) refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate) overdraftLimit := new(big.Int).Add(accountingPeer.paymentThreshold, refreshDue) @@ -745,10 +742,7 @@ func (a *Accounting) PeerAccounting() (map[string]PeerInfo, error) { t := a.timeNow() - timeElapsedInSeconds := t.Unix() - accountingPeer.refreshReceivedTimestamp - if timeElapsedInSeconds > 1 { - timeElapsedInSeconds = 1 - } + timeElapsedInSeconds := min(t.Unix()-accountingPeer.refreshReceivedTimestamp, 1) // get appropriate refresh rate refreshRate := new(big.Int).Set(a.refreshRate) @@ -759,10 +753,7 @@ func (a *Accounting) PeerAccounting() (map[string]PeerInfo, error) { refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), refreshRate) currentThresholdGiven := new(big.Int).Add(accountingPeer.disconnectLimit, refreshDue) - timeElapsedInSeconds = (t.UnixMilli() - accountingPeer.refreshTimestampMilliseconds) / 1000 - if timeElapsedInSeconds > 1 { - timeElapsedInSeconds = 1 - } + timeElapsedInSeconds = min((t.UnixMilli()-accountingPeer.refreshTimestampMilliseconds)/1000, 1) // get appropriate refresh rate refreshDue = new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate) @@ -1352,10 +1343,7 @@ func (d *debitAction) Apply() error { a.metrics.TotalDebitedAmount.Add(tot) a.metrics.DebitEventsCount.Inc() - timeElapsedInSeconds := a.timeNow().Unix() - d.accountingPeer.refreshReceivedTimestamp - if timeElapsedInSeconds > 1 { - timeElapsedInSeconds = 1 - } + timeElapsedInSeconds := min(a.timeNow().Unix()-d.accountingPeer.refreshReceivedTimestamp, 1) // get appropriate refresh rate refreshRate := new(big.Int).Set(a.refreshRate) diff --git a/pkg/accounting/accounting_test.go b/pkg/accounting/accounting_test.go index bfcd0fcda05..ec72235534e 100644 --- a/pkg/accounting/accounting_test.go +++ b/pkg/accounting/accounting_test.go @@ -1503,7 +1503,7 @@ func TestAccountingCallPaymentErrorRetries(t *testing.T) { acc.NotifyPaymentSent(peer1Addr, sentAmount, errors.New("error")) // try another n requests 1 per second - for i := 0; i < 10; i++ { + for range 10 { ts++ acc.SetTime(ts) @@ -1857,8 +1857,8 @@ func testAccountingSettlementGrowingThresholds(t *testing.T, settleFunc func(t * checkPaymentThreshold := new(big.Int).Set(testPayThreshold) // Simulate first 18 threshold upgrades - for j := 0; j < 18; j++ { - for i := 0; i < 100; i++ { + for range 18 { + for range 100 { // expect no change in threshold while less than 100 seconds worth of refreshment rate was settled settleFunc(t, acc, peer1Addr, testGrowth-1) @@ -1891,7 +1891,7 @@ func testAccountingSettlementGrowingThresholds(t *testing.T, settleFunc func(t * // Expect no increase for the next 179 seconds of refreshment - for k := 0; k < 1799; k++ { + for range 1799 { settleFunc(t, acc, peer1Addr, testGrowth) @@ -1917,7 +1917,7 @@ func testAccountingSettlementGrowingThresholds(t *testing.T, settleFunc func(t * // Expect no increase for another 3599 seconds of refreshments - for k := 0; k < 3599; k++ { + for range 3599 { settleFunc(t, acc, peer1Addr, testGrowth) diff --git a/pkg/api/api.go b/pkg/api/api.go index 418744ad621..5f747237bc4 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -624,7 +624,7 @@ func (s *Service) checkOrigin(r *http.Request) bool { // validationError is a custom error type for validation errors. type validationError struct { Entry string - Value interface{} + Value any Cause error } @@ -636,7 +636,7 @@ func (e *validationError) Error() string { // mapStructure maps the input into output struct and validates the output. // It's a helper method for the handlers, which reduces the chattiness // of the code. -func (s *Service) mapStructure(input, output interface{}) func(string, log.Logger, http.ResponseWriter) { +func (s *Service) mapStructure(input, output any) func(string, log.Logger, http.ResponseWriter) { // response unifies the response format for parsing and validation errors. response := func(err error) func(string, log.Logger, http.ResponseWriter) { return func(msg string, logger log.Logger, w http.ResponseWriter) { diff --git a/pkg/api/bzz_test.go b/pkg/api/bzz_test.go index 9738cb50f17..7f636476d1f 100644 --- a/pkg/api/bzz_test.go +++ b/pkg/api/bzz_test.go @@ -104,10 +104,7 @@ func TestBzzUploadDownloadWithRedundancy_FLAKY(t *testing.T) { store.Record() defer store.Unrecord() // we intend to forget as many chunks as possible for the given redundancy level - forget := parityCnt - if parityCnt > shardCnt { - forget = shardCnt - } + forget := min(parityCnt, shardCnt) if levels == 1 { forget = 2 } @@ -141,7 +138,7 @@ func TestBzzUploadDownloadWithRedundancy_FLAKY(t *testing.T) { if len(got) != len(want) { t.Fatalf("got %v parts, want %v parts", len(got), len(want)) } - for i := 0; i < len(want); i++ { + for i := range want { if !bytes.Equal(got[i], want[i]) { t.Errorf("part %v: got %q, want %q", i, string(got[i]), string(want[i])) } @@ -670,7 +667,7 @@ func TestBzzFilesRangeRequests(t *testing.T) { if len(got) != len(want) { t.Fatalf("got %v parts, want %v parts", len(got), len(want)) } - for i := 0; i < len(want); i++ { + for i := range want { if !bytes.Equal(got[i], want[i]) { t.Errorf("part %v: got %q, want %q", i, string(got[i]), string(want[i])) } @@ -681,7 +678,7 @@ func TestBzzFilesRangeRequests(t *testing.T) { } } -func createRangeHeader(data interface{}, ranges [][2]int) (header string, parts [][]byte) { +func createRangeHeader(data any, ranges [][2]int) (header string, parts [][]byte) { getLen := func() int { switch data := data.(type) { case []byte: diff --git a/pkg/api/chunk_stream_test.go b/pkg/api/chunk_stream_test.go index 47c8e860b51..5ba62cdd76c 100644 --- a/pkg/api/chunk_stream_test.go +++ b/pkg/api/chunk_stream_test.go @@ -38,7 +38,7 @@ func TestChunkUploadStream(t *testing.T) { t.Run("upload and verify", func(t *testing.T) { chsToGet := []swarm.Chunk{} - for i := 0; i < 5; i++ { + for range 5 { ch := testingc.GenerateTestRandomChunk() err := wsConn.SetWriteDeadline(time.Now().Add(time.Second)) diff --git a/pkg/api/export_test.go b/pkg/api/export_test.go index 1e5e8f1554f..00494f928da 100644 --- a/pkg/api/export_test.go +++ b/pkg/api/export_test.go @@ -133,7 +133,7 @@ var ErrHexLength = errHexLength type HexInvalidByteError = hexInvalidByteError -func MapStructure(input, output interface{}, hooks map[string]func(v string) (string, error)) error { +func MapStructure(input, output any, hooks map[string]func(v string) (string, error)) error { return mapStructure(input, output, hooks) } diff --git a/pkg/api/logger_test.go b/pkg/api/logger_test.go index 1e13917535f..78caf7078f3 100644 --- a/pkg/api/logger_test.go +++ b/pkg/api/logger_test.go @@ -65,8 +65,8 @@ func TestGetLoggers(t *testing.T) { } api.ReplaceLogRegistryIterateFn(fn) - have := make(map[string]interface{}) - want := make(map[string]interface{}) + have := make(map[string]any) + want := make(map[string]any) data := `{"loggers":[{"id":"b25lWzBdW10-PjgyNDYzNDg2MDM2MA==","logger":"one","subsystem":"one[0][]\u003e\u003e824634860360","verbosity":"all"},{"id":"b25lL25hbWVbMF1bXT4-ODI0NjM0ODYwMzYw","logger":"one/name","subsystem":"one/name[0][]\u003e\u003e824634860360","verbosity":"warning"},{"id":"b25lL25hbWVbMF1bXCJ2YWxcIj0xXT4-ODI0NjM0ODYwMzYw","logger":"one/name","subsystem":"one/name[0][\\\"val\\\"=1]\u003e\u003e824634860360","verbosity":"warning"},{"id":"b25lL25hbWVbMV1bXT4-ODI0NjM0ODYwMzYw","logger":"one/name","subsystem":"one/name[1][]\u003e\u003e824634860360","verbosity":"info"},{"id":"b25lL25hbWVbMl1bXT4-ODI0NjM0ODYwMzYw","logger":"one/name","subsystem":"one/name[2][]\u003e\u003e824634860360","verbosity":"info"}],"tree":{"one":{"+":["all|one[0][]\u003e\u003e824634860360"],"/":{"name":{"+":["warning|one/name[0][]\u003e\u003e824634860360","warning|one/name[0][\\\"val\\\"=1]\u003e\u003e824634860360","info|one/name[1][]\u003e\u003e824634860360","info|one/name[2][]\u003e\u003e824634860360"]}}}}}` if err := json.Unmarshal([]byte(data), &want); err != nil { t.Fatalf("unexpected error: %v", err) diff --git a/pkg/api/postage_test.go b/pkg/api/postage_test.go index bc476be99da..53ac6ccb8ae 100644 --- a/pkg/api/postage_test.go +++ b/pkg/api/postage_test.go @@ -702,7 +702,7 @@ func TestPostageAccessHandler(t *testing.T) { method string url string respCode int - resp interface{} + resp any } success := []operation{ diff --git a/pkg/api/tag.go b/pkg/api/tag.go index 6a38fd7a65b..67ebf7b7acf 100644 --- a/pkg/api/tag.go +++ b/pkg/api/tag.go @@ -19,7 +19,7 @@ import ( ) type tagRequest struct { - Address swarm.Address `json:"address,omitempty"` + Address swarm.Address `json:"address"` } type tagResponse struct { diff --git a/pkg/api/util.go b/pkg/api/util.go index 340a6270b5f..60780c5acb4 100644 --- a/pkg/api/util.go +++ b/pkg/api/util.go @@ -132,7 +132,7 @@ var flattenErrorsFormat = func(es []error) string { // // In case of parsing error, a new parseError is returned to the caller. // The caller can use the Unwrap method to get the original error. -func mapStructure(input, output interface{}, hooks map[string]func(v string) (string, error)) (err error) { +func mapStructure(input, output any, hooks map[string]func(v string) (string, error)) (err error) { if input == nil || output == nil { return nil } diff --git a/pkg/api/util_test.go b/pkg/api/util_test.go index 4628ce31270..d9ae6732810 100644 --- a/pkg/api/util_test.go +++ b/pkg/api/util_test.go @@ -102,8 +102,8 @@ func TestMapStructure(t *testing.T) { tests := []struct { name string - src interface{} - want interface{} + src any + want any wantErr error }{{ name: "bool zero value", @@ -520,7 +520,7 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) { t.Run("input is nil", func(t *testing.T) { t.Parallel() - var input interface{} + var input any err := api.MapStructure(input, struct{}{}, nil) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -541,7 +541,7 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) { t.Parallel() var ( - input = map[string]interface{}{"someVal": "123"} + input = map[string]any{"someVal": "123"} output struct { SomeVal string `map:"someVal"` } @@ -556,8 +556,8 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) { t.Parallel() var ( - input = map[string]interface{}{"someVal": "123"} - output interface{} + input = map[string]any{"someVal": "123"} + output any ) err := api.MapStructure(&input, output, nil) if err != nil { @@ -569,7 +569,7 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) { t.Parallel() var ( - input = map[string]interface{}{"someVal": "123"} + input = map[string]any{"someVal": "123"} output = struct { SomeVal string `map:"someVal"` }{} @@ -584,7 +584,7 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) { t.Parallel() var ( - input = map[string]interface{}{"someVal": "123"} + input = map[string]any{"someVal": "123"} output = "foo" ) err := api.MapStructure(&input, &output, nil) diff --git a/pkg/bitvector/bitvector_test.go b/pkg/bitvector/bitvector_test.go index d986c5624d8..c2a74e4c67e 100644 --- a/pkg/bitvector/bitvector_test.go +++ b/pkg/bitvector/bitvector_test.go @@ -64,7 +64,7 @@ func TestBitvectorGetSet(t *testing.T) { t.Errorf("error for length %v: %v", length, err) } - for i := 0; i < length; i++ { + for i := range length { if bv.Get(i) { t.Errorf("expected false for element on index %v", i) } @@ -79,9 +79,9 @@ func TestBitvectorGetSet(t *testing.T) { bv.Get(length + 8) }() - for i := 0; i < length; i++ { + for i := range length { bv.Set(i) - for j := 0; j < length; j++ { + for j := range length { if j == i { if !bv.Get(j) { t.Errorf("element on index %v is not set to true", i) diff --git a/pkg/blocker/blocker.go b/pkg/blocker/blocker.go index 5b916c81732..2d3deb10bf1 100644 --- a/pkg/blocker/blocker.go +++ b/pkg/blocker/blocker.go @@ -61,9 +61,7 @@ func New(blocklister p2p.Blocklister, flagTimeout, blockDuration, wakeUpTime tim blocklistCallback: callback, } - b.closeWg.Add(1) - go func() { - defer b.closeWg.Done() + b.closeWg.Go(func() { for { select { case <-b.quit: @@ -74,11 +72,9 @@ func New(blocklister p2p.Blocklister, flagTimeout, blockDuration, wakeUpTime tim } } } - }() + }) - b.closeWg.Add(1) - go func() { - defer b.closeWg.Done() + b.closeWg.Go(func() { for { select { case <-time.After(wakeUpTime): @@ -87,7 +83,7 @@ func New(blocklister p2p.Blocklister, flagTimeout, blockDuration, wakeUpTime tim return } } - }() + }) return b } diff --git a/pkg/bmt/benchmark_test.go b/pkg/bmt/benchmark_test.go index 7ab2339c02d..aa6cc1703dd 100644 --- a/pkg/bmt/benchmark_test.go +++ b/pkg/bmt/benchmark_test.go @@ -69,7 +69,7 @@ func benchmarkBMTBaseline(b *testing.B, _ int) { for b.Loop() { eg := new(errgroup.Group) - for j := 0; j < testSegmentCount; j++ { + for range testSegmentCount { eg.Go(func() error { _, err := bmt.Sha3hash(testData[:hashSize]) return err @@ -113,7 +113,7 @@ func benchmarkPool(b *testing.B, poolsize int) { for b.Loop() { eg := new(errgroup.Group) - for j := 0; j < cycles; j++ { + for range cycles { eg.Go(func() error { h := pool.Get() defer pool.Put(h) diff --git a/pkg/bmt/bmt_test.go b/pkg/bmt/bmt_test.go index 0eefe9fdb5d..c82fb896a6f 100644 --- a/pkg/bmt/bmt_test.go +++ b/pkg/bmt/bmt_test.go @@ -129,7 +129,7 @@ func testHasherReuse(t *testing.T, poolsize int) { h := pool.Get() defer pool.Put(h) - for i := 0; i < 100; i++ { + for i := range 100 { seed := int64(i) testData := testutil.RandBytesWithSeed(t, 4096, seed) n := rand.Intn(h.Capacity()) @@ -151,7 +151,7 @@ func TestBMTConcurrentUse(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() eg, ectx := errgroup.WithContext(ctx) - for i := 0; i < cycles; i++ { + for range cycles { eg.Go(func() error { select { case <-ectx.Done(): @@ -204,7 +204,7 @@ func TestBMTWriterBuffers(t *testing.T) { reads := rand.Intn(count*2-1) + 1 offsets := make([]int, reads+1) - for i := 0; i < reads; i++ { + for i := range reads { offsets[i] = rand.Intn(size) + 1 } offsets[reads] = size @@ -235,7 +235,7 @@ func TestBMTWriterBuffers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() eg, ectx := errgroup.WithContext(ctx) - for i := 0; i < attempts; i++ { + for range attempts { eg.Go(func() error { select { case <-ectx.Done(): diff --git a/pkg/bmt/proof_test.go b/pkg/bmt/proof_test.go index ba1c3e7220c..5b3625acefb 100644 --- a/pkg/bmt/proof_test.go +++ b/pkg/bmt/proof_test.go @@ -209,7 +209,7 @@ func TestProof(t *testing.T) { t.Fatal(err) } - for i := 0; i < 128; i++ { + for i := range 128 { t.Run(fmt.Sprintf("segmentIndex %d", i), func(t *testing.T) { t.Parallel() diff --git a/pkg/bzz/utilities_test.go b/pkg/bzz/utilities_test.go index 7e4666b66ac..e92e7e11e13 100644 --- a/pkg/bzz/utilities_test.go +++ b/pkg/bzz/utilities_test.go @@ -45,7 +45,7 @@ func makeAddreses(t *testing.T, count int) []bzz.Address { t.Helper() result := make([]bzz.Address, count) - for i := 0; i < count; i++ { + for i := range count { result[i] = makeAddress(t) } return result diff --git a/pkg/encryption/encryption.go b/pkg/encryption/encryption.go index 97a582814c8..d46d567088d 100644 --- a/pkg/encryption/encryption.go +++ b/pkg/encryption/encryption.go @@ -158,7 +158,7 @@ func (e *Encryption) Transcrypt(i int, in, out []byte) error { // XOR bytes uptil length of in (out must be at least as long) inLength := len(in) - for j := 0; j < inLength; j++ { + for j := range inLength { out[j] = in[j] ^ segmentKey[j] } // insert padding if out is longer diff --git a/pkg/encryption/mock/mock.go b/pkg/encryption/mock/mock.go index 66420fb2f1b..b917b095be7 100644 --- a/pkg/encryption/mock/mock.go +++ b/pkg/encryption/mock/mock.go @@ -124,7 +124,7 @@ func xor(input, key []byte) ([]byte, error) { } inputLen := len(input) output := make([]byte, inputLen) - for i := 0; i < inputLen; i++ { + for i := range inputLen { output[i] = input[i] ^ key[i%keyLen] } return output, nil diff --git a/pkg/feeds/epochs/lookup_benchmark_test.go b/pkg/feeds/epochs/lookup_benchmark_test.go index 9d1005ab023..0d409bef686 100644 --- a/pkg/feeds/epochs/lookup_benchmark_test.go +++ b/pkg/feeds/epochs/lookup_benchmark_test.go @@ -38,7 +38,7 @@ func BenchmarkFinder(b *testing.B) { ctx := context.Background() - for at := int64(0); at < prefill; at++ { + for at := range prefill { err = updater.Update(ctx, at, payload) if err != nil { b.Fatal(err) diff --git a/pkg/feeds/sequence/lookup_benchmark_test.go b/pkg/feeds/sequence/lookup_benchmark_test.go index d10fe144aca..73e9255d497 100644 --- a/pkg/feeds/sequence/lookup_benchmark_test.go +++ b/pkg/feeds/sequence/lookup_benchmark_test.go @@ -36,7 +36,7 @@ func BenchmarkFinder(b *testing.B) { ctx := context.Background() - for at := int64(0); at < prefill; at++ { + for at := range prefill { err = updater.Update(ctx, at, payload) if err != nil { b.Fatal(err) diff --git a/pkg/feeds/testing/lookup.go b/pkg/feeds/testing/lookup.go index 21656be5b64..e56e3c477ba 100644 --- a/pkg/feeds/testing/lookup.go +++ b/pkg/feeds/testing/lookup.go @@ -186,7 +186,7 @@ func TestFinderIntervals(t *testing.T, nextf func() (bool, int64), finderf func( func TestFinderRandomIntervals(t *testing.T, finderf func(storage.Getter, *feeds.Feed) feeds.Lookup, updaterf func(putter storage.Putter, signer crypto.Signer, topic []byte) (feeds.Updater, error)) { t.Parallel() - for j := 0; j < 3; j++ { + for j := range 3 { t.Run(fmt.Sprintf("random intervals %d", j), func(t *testing.T) { t.Parallel() diff --git a/pkg/file/buffer_test.go b/pkg/file/buffer_test.go index 9fdf66266b6..71b374b7f4b 100644 --- a/pkg/file/buffer_test.go +++ b/pkg/file/buffer_test.go @@ -119,8 +119,8 @@ func TestCopyBuffer(t *testing.T) { dataSize int }{} - for i := 0; i < len(readBufferSizes); i++ { - for j := 0; j < len(dataSizes); j++ { + for i := range readBufferSizes { + for j := range dataSizes { testCases = append(testCases, struct { readBufferSize int dataSize int diff --git a/pkg/file/joiner/joiner.go b/pkg/file/joiner/joiner.go index 313ea21bd2c..cc89f921397 100644 --- a/pkg/file/joiner/joiner.go +++ b/pkg/file/joiner/joiner.go @@ -209,10 +209,7 @@ func (j *joiner) ReadAt(buffer []byte, off int64) (read int, err error) { return 0, io.EOF } - readLen := int64(cap(buffer)) - if readLen > j.span-off { - readLen = j.span - off - } + readLen := min(int64(cap(buffer)), j.span-off) var bytesRead int64 var eg errgroup.Group j.readAtOffset(buffer, j.rootData, 0, j.span, off, 0, readLen, &bytesRead, j.rootParity, &eg) @@ -277,14 +274,9 @@ func (j *joiner) readAtOffset( subtrieSpanLimit := sec currentReadSize := subtrieSpan - (off - cur) // the size of the subtrie, minus the offset from the start of the trie - // upper bound alignments - if currentReadSize > bytesToRead { - currentReadSize = bytesToRead - } - if currentReadSize > subtrieSpan { - currentReadSize = subtrieSpan - } + currentReadSize = min(currentReadSize, bytesToRead) + currentReadSize = min(currentReadSize, subtrieSpan) func(address swarm.Address, b []byte, cur, subTrieSize, off, bufferOffset, bytesToRead, subtrieSpanLimit int64) { eg.Go(func() error { diff --git a/pkg/file/joiner/joiner_test.go b/pkg/file/joiner/joiner_test.go index 27591252548..a8087a859db 100644 --- a/pkg/file/joiner/joiner_test.go +++ b/pkg/file/joiner/joiner_test.go @@ -790,7 +790,7 @@ func TestJoinerTwoLevelsAcrossChunk(t *testing.T) { // create 128+1 chunks for all references in the intermediate chunks cursor := 8 - for i := 0; i < swarm.Branches; i++ { + for range swarm.Branches { chunkAddressBytes := firstChunk.Data()[cursor : cursor+swarm.SectionSize] chunkAddress := swarm.NewAddress(chunkAddressBytes) ch := filetest.GenerateTestRandomFileChunk(chunkAddress, swarm.ChunkSize, swarm.ChunkSize) @@ -815,7 +815,7 @@ func TestJoinerTwoLevelsAcrossChunk(t *testing.T) { // read back all the chunks and verify b := make([]byte, swarm.ChunkSize) - for i := 0; i < swarm.Branches; i++ { + for i := range swarm.Branches { c, err := j.Read(b) if err != nil { t.Fatal(err) diff --git a/pkg/file/joiner/redecoder_test.go b/pkg/file/joiner/redecoder_test.go index 7df041de76d..0cf3b604ec2 100644 --- a/pkg/file/joiner/redecoder_test.go +++ b/pkg/file/joiner/redecoder_test.go @@ -33,7 +33,7 @@ func TestReDecoderFlow(t *testing.T) { // Create real data chunks with proper content dataShards := make([][]byte, dataShardCount) - for i := 0; i < dataShardCount; i++ { + for i := range dataShardCount { // Create chunks with simpler test data dataShards[i] = make([]byte, swarm.ChunkWithSpanSize) // Create a unique string for this shard @@ -44,7 +44,7 @@ func TestReDecoderFlow(t *testing.T) { // Create parity chunks using Reed-Solomon encoding parityShards := make([][]byte, parityShardCount) - for i := 0; i < parityShardCount; i++ { + for i := range parityShardCount { parityShards[i] = make([]byte, swarm.ChunkWithSpanSize) } @@ -68,7 +68,7 @@ func TestReDecoderFlow(t *testing.T) { addresses := make([]swarm.Address, totalShardCount) chunks := make([]swarm.Chunk, totalShardCount) - for i := 0; i < totalShardCount; i++ { + for i := range totalShardCount { // Create proper content-addressed chunks chunk, err := cac.NewWithDataSpan(allShards[i]) if err != nil { @@ -125,7 +125,7 @@ func TestReDecoderFlow(t *testing.T) { // we can still access the chunks // Sanity check - verify we can still fetch chunks through the cache - for i := 0; i < dataShardCount; i++ { + for i := range dataShardCount { _, err := decoder.Get(ctx, addresses[i]) if err != nil { t.Fatalf("Failed to get chunk %d after recovery: %v", i, err) @@ -139,7 +139,7 @@ func TestReDecoderFlow(t *testing.T) { } // Verify all chunks can be fetched through the ReDecoder - for i := 0; i < dataShardCount; i++ { + for i := range dataShardCount { _, err := newDecoder.Get(ctx, addresses[i]) if err != nil { t.Fatalf("Failed to get chunk %d through ReDecoder: %v", i, err) diff --git a/pkg/file/pipeline/hashtrie/hashtrie_test.go b/pkg/file/pipeline/hashtrie/hashtrie_test.go index edbe3c9e2e5..9f502f3fd5d 100644 --- a/pkg/file/pipeline/hashtrie/hashtrie_test.go +++ b/pkg/file/pipeline/hashtrie/hashtrie_test.go @@ -198,7 +198,7 @@ func TestLevels_TrieFull(t *testing.T) { ) // to create a level wrap we need to do branching^(level-1) writes - for i := 0; i < writes; i++ { + for range writes { a := &pipeline.PipeWriteArgs{Ref: addr.Bytes(), Span: span} err := ht.ChainWrite(a) if err != nil { @@ -239,7 +239,7 @@ func TestRegression(t *testing.T) { ) binary.LittleEndian.PutUint64(span, 4096) - for i := 0; i < writes; i++ { + for range writes { a := &pipeline.PipeWriteArgs{Ref: addr.Bytes(), Span: span} err := ht.ChainWrite(a) if err != nil { diff --git a/pkg/file/redundancy/getter/getter.go b/pkg/file/redundancy/getter/getter.go index 0953c501dc5..30ec23c00b3 100644 --- a/pkg/file/redundancy/getter/getter.go +++ b/pkg/file/redundancy/getter/getter.go @@ -69,12 +69,12 @@ func New(addrs []swarm.Address, shardCnt int, g storage.Getter, p storage.Putter } // after init, cache and wait channels are immutable, need no locking - for i := 0; i < shardCnt; i++ { + for i := range shardCnt { d.cache[addrs[i].ByteString()] = i } // after init, cache and wait channels are immutable, need no locking - for i := 0; i < size; i++ { + for i := range size { d.waits[i] = make(chan error) } diff --git a/pkg/file/redundancy/getter/getter_test.go b/pkg/file/redundancy/getter/getter_test.go index 42566866a3e..f3c9a27c3ff 100644 --- a/pkg/file/redundancy/getter/getter_test.go +++ b/pkg/file/redundancy/getter/getter_test.go @@ -266,7 +266,7 @@ func initData(t *testing.T, buf [][]byte, shardCnt int, s storage.ChunkStore) [] spanBytes := make([]byte, 8) binary.LittleEndian.PutUint64(spanBytes, swarm.ChunkSize) - for i := 0; i < len(buf); i++ { + for i := range buf { buf[i] = make([]byte, swarm.ChunkWithSpanSize) if i >= shardCnt { continue @@ -291,7 +291,7 @@ func initData(t *testing.T, buf [][]byte, shardCnt int, s storage.ChunkStore) [] // calculate chunk addresses and upload to the store addrs := make([]swarm.Address, len(buf)) ctx := context.TODO() - for i := 0; i < len(buf); i++ { + for i := range buf { chunk, err := cac.NewWithDataSpan(buf[i]) if err != nil { t.Fatal(err) @@ -313,7 +313,7 @@ func checkShardsAvailable(t *testing.T, s storage.ChunkStore, addrs []swarm.Addr eg.Go(func() (err error) { var delay time.Duration var ch swarm.Chunk - for i := 0; i < 30; i++ { + for i := range 30 { select { case <-ctx.Done(): return ctx.Err() diff --git a/pkg/file/span.go b/pkg/file/span.go index b8366233bd2..681cafb4acb 100644 --- a/pkg/file/span.go +++ b/pkg/file/span.go @@ -22,7 +22,7 @@ func GenerateSpanSizes(levels, branches int) []int64 { spans := make([]int64, levels) branchesSixtyfour := int64(branches) var span int64 = 1 - for i := 0; i < 9; i++ { + for i := range 9 { spans[i] = span span *= branchesSixtyfour } diff --git a/pkg/file/splitter/internal/job_test.go b/pkg/file/splitter/internal/job_test.go index 7e9824d4748..98a44ce3f51 100644 --- a/pkg/file/splitter/internal/job_test.go +++ b/pkg/file/splitter/internal/job_test.go @@ -82,10 +82,7 @@ func testSplitterJobVector(t *testing.T) { j := internal.NewSimpleSplitterJob(ctx, store, int64(len(data)), false) for i := 0; i < len(data); i += swarm.ChunkSize { - l := swarm.ChunkSize - if len(data)-i < swarm.ChunkSize { - l = len(data) - i - } + l := min(len(data)-i, swarm.ChunkSize) c, err := j.Write(data[i : i+l]) if err != nil { t.Fatal(err) diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index 6497c9b0c63..464681258e2 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -50,7 +50,7 @@ func (l *listener) Subscribe(address swarm.Address, handler Handler) (cleanup fu defer l.handlersMu.Unlock() h := l.handlers[address.ByteString()] - for i := 0; i < len(h); i++ { + for i := range h { if h[i] == &handler { l.handlers[address.ByteString()] = append(h[:i], h[i+1:]...) return diff --git a/pkg/gsoc/gsoc_test.go b/pkg/gsoc/gsoc_test.go index dfbe8e03a5c..dc49b0809a8 100644 --- a/pkg/gsoc/gsoc_test.go +++ b/pkg/gsoc/gsoc_test.go @@ -114,7 +114,7 @@ func ensureCalls(t *testing.T, calls *int, exp int) { func waitHandlerCallback(t *testing.T, msgChan *chan struct{}, count int) { t.Helper() - for received := 0; received < count; received++ { + for range count { select { case <-*msgChan: case <-time.After(1 * time.Second): diff --git a/pkg/hive/hive.go b/pkg/hive/hive.go index 27858cdcf87..7932fffc401 100644 --- a/pkg/hive/hive.go +++ b/pkg/hive/hive.go @@ -249,31 +249,25 @@ func (s *Service) disconnect(peer p2p.Peer) error { func (s *Service) startCheckPeersHandler() { ctx, cancel := context.WithCancel(context.Background()) - s.wg.Add(1) - go func() { - defer s.wg.Done() + s.wg.Go(func() { <-s.quit cancel() - }() + }) - s.wg.Add(1) - go func() { - defer s.wg.Done() + s.wg.Go(func() { for { select { case <-ctx.Done(): return case newPeers := <-s.peersChan: - s.wg.Add(1) - go func() { - defer s.wg.Done() + s.wg.Go(func() { cctx, cancel := context.WithTimeout(ctx, batchValidationTimeout) defer cancel() s.checkAndAddPeers(cctx, newPeers) - }() + }) } } - }() + }) } func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) { diff --git a/pkg/hive/hive_test.go b/pkg/hive/hive_test.go index fdb903cf7e5..df06c2674ed 100644 --- a/pkg/hive/hive_test.go +++ b/pkg/hive/hive_test.go @@ -119,11 +119,11 @@ func TestBroadcastPeers(t *testing.T) { // populate all expected and needed random resources for 2 full batches // tests cases that uses fewer resources can use sub-slices of this data - var bzzAddresses []bzz.Address - var overlays []swarm.Address - var wantMsgs []pb.Peers + bzzAddresses := make([]bzz.Address, 0, 2*hive.MaxBatchSize) + overlays := make([]swarm.Address, 0, 2*hive.MaxBatchSize) + wantMsgs := make([]pb.Peers, 0, 2*hive.MaxBatchSize) - for i := 0; i < 2; i++ { + for range 2 { wantMsgs = append(wantMsgs, pb.Peers{Peers: []*pb.BzzAddress{}}) } diff --git a/pkg/jsonhttp/jsonhttp.go b/pkg/jsonhttp/jsonhttp.go index d9806dca523..e555223e110 100644 --- a/pkg/jsonhttp/jsonhttp.go +++ b/pkg/jsonhttp/jsonhttp.go @@ -44,7 +44,7 @@ type StatusResponse struct { } // Respond writes a JSON-encoded body to http.ResponseWriter. -func Respond(w http.ResponseWriter, statusCode int, response interface{}) { +func Respond(w http.ResponseWriter, statusCode int, response any) { if statusCode == 0 { statusCode = http.StatusOK } @@ -88,32 +88,32 @@ func Respond(w http.ResponseWriter, statusCode int, response interface{}) { } // Continue writes a response with status code 100. -func Continue(w http.ResponseWriter, response interface{}) { +func Continue(w http.ResponseWriter, response any) { Respond(w, http.StatusContinue, response) } // SwitchingProtocols writes a response with status code 101. -func SwitchingProtocols(w http.ResponseWriter, response interface{}) { +func SwitchingProtocols(w http.ResponseWriter, response any) { Respond(w, http.StatusSwitchingProtocols, response) } // OK writes a response with status code 200. -func OK(w http.ResponseWriter, response interface{}) { +func OK(w http.ResponseWriter, response any) { Respond(w, http.StatusOK, response) } // Created writes a response with status code 201. -func Created(w http.ResponseWriter, response interface{}) { +func Created(w http.ResponseWriter, response any) { Respond(w, http.StatusCreated, response) } // Accepted writes a response with status code 202. -func Accepted(w http.ResponseWriter, response interface{}) { +func Accepted(w http.ResponseWriter, response any) { Respond(w, http.StatusAccepted, response) } // NonAuthoritativeInfo writes a response with status code 203. -func NonAuthoritativeInfo(w http.ResponseWriter, response interface{}) { +func NonAuthoritativeInfo(w http.ResponseWriter, response any) { Respond(w, http.StatusNonAuthoritativeInfo, response) } @@ -125,206 +125,206 @@ func NoContent(w http.ResponseWriter) { } // ResetContent writes a response with status code 205. -func ResetContent(w http.ResponseWriter, response interface{}) { +func ResetContent(w http.ResponseWriter, response any) { Respond(w, http.StatusResetContent, response) } // PartialContent writes a response with status code 206. -func PartialContent(w http.ResponseWriter, response interface{}) { +func PartialContent(w http.ResponseWriter, response any) { Respond(w, http.StatusPartialContent, response) } // MultipleChoices writes a response with status code 300. -func MultipleChoices(w http.ResponseWriter, response interface{}) { +func MultipleChoices(w http.ResponseWriter, response any) { Respond(w, http.StatusMultipleChoices, response) } // MovedPermanently writes a response with status code 301. -func MovedPermanently(w http.ResponseWriter, response interface{}) { +func MovedPermanently(w http.ResponseWriter, response any) { Respond(w, http.StatusMovedPermanently, response) } // Found writes a response with status code 302. -func Found(w http.ResponseWriter, response interface{}) { +func Found(w http.ResponseWriter, response any) { Respond(w, http.StatusFound, response) } // SeeOther writes a response with status code 303. -func SeeOther(w http.ResponseWriter, response interface{}) { +func SeeOther(w http.ResponseWriter, response any) { Respond(w, http.StatusSeeOther, response) } // NotModified writes a response with status code 304. -func NotModified(w http.ResponseWriter, response interface{}) { +func NotModified(w http.ResponseWriter, response any) { Respond(w, http.StatusNotModified, response) } // UseProxy writes a response with status code 305. -func UseProxy(w http.ResponseWriter, response interface{}) { +func UseProxy(w http.ResponseWriter, response any) { Respond(w, http.StatusUseProxy, response) } // TemporaryRedirect writes a response with status code 307. -func TemporaryRedirect(w http.ResponseWriter, response interface{}) { +func TemporaryRedirect(w http.ResponseWriter, response any) { Respond(w, http.StatusTemporaryRedirect, response) } // PermanentRedirect writes a response with status code 308. -func PermanentRedirect(w http.ResponseWriter, response interface{}) { +func PermanentRedirect(w http.ResponseWriter, response any) { Respond(w, http.StatusPermanentRedirect, response) } // BadRequest writes a response with status code 400. -func BadRequest(w http.ResponseWriter, response interface{}) { +func BadRequest(w http.ResponseWriter, response any) { Respond(w, http.StatusBadRequest, response) } // Unauthorized writes a response with status code 401. -func Unauthorized(w http.ResponseWriter, response interface{}) { +func Unauthorized(w http.ResponseWriter, response any) { Respond(w, http.StatusUnauthorized, response) } // PaymentRequired writes a response with status code 402. -func PaymentRequired(w http.ResponseWriter, response interface{}) { +func PaymentRequired(w http.ResponseWriter, response any) { Respond(w, http.StatusPaymentRequired, response) } // Forbidden writes a response with status code 403. -func Forbidden(w http.ResponseWriter, response interface{}) { +func Forbidden(w http.ResponseWriter, response any) { Respond(w, http.StatusForbidden, response) } // NotFound writes a response with status code 404. -func NotFound(w http.ResponseWriter, response interface{}) { +func NotFound(w http.ResponseWriter, response any) { Respond(w, http.StatusNotFound, response) } // MethodNotAllowed writes a response with status code 405. -func MethodNotAllowed(w http.ResponseWriter, response interface{}) { +func MethodNotAllowed(w http.ResponseWriter, response any) { Respond(w, http.StatusMethodNotAllowed, response) } // NotAcceptable writes a response with status code 406. -func NotAcceptable(w http.ResponseWriter, response interface{}) { +func NotAcceptable(w http.ResponseWriter, response any) { Respond(w, http.StatusNotAcceptable, response) } // ProxyAuthRequired writes a response with status code 407. -func ProxyAuthRequired(w http.ResponseWriter, response interface{}) { +func ProxyAuthRequired(w http.ResponseWriter, response any) { Respond(w, http.StatusProxyAuthRequired, response) } // RequestTimeout writes a response with status code 408. -func RequestTimeout(w http.ResponseWriter, response interface{}) { +func RequestTimeout(w http.ResponseWriter, response any) { Respond(w, http.StatusRequestTimeout, response) } // Conflict writes a response with status code 409. -func Conflict(w http.ResponseWriter, response interface{}) { +func Conflict(w http.ResponseWriter, response any) { Respond(w, http.StatusConflict, response) } // Gone writes a response with status code 410. -func Gone(w http.ResponseWriter, response interface{}) { +func Gone(w http.ResponseWriter, response any) { Respond(w, http.StatusGone, response) } // LengthRequired writes a response with status code 411. -func LengthRequired(w http.ResponseWriter, response interface{}) { +func LengthRequired(w http.ResponseWriter, response any) { Respond(w, http.StatusLengthRequired, response) } // PreconditionFailed writes a response with status code 412. -func PreconditionFailed(w http.ResponseWriter, response interface{}) { +func PreconditionFailed(w http.ResponseWriter, response any) { Respond(w, http.StatusPreconditionFailed, response) } // RequestEntityTooLarge writes a response with status code 413. -func RequestEntityTooLarge(w http.ResponseWriter, response interface{}) { +func RequestEntityTooLarge(w http.ResponseWriter, response any) { Respond(w, http.StatusRequestEntityTooLarge, response) } // RequestURITooLong writes a response with status code 414. -func RequestURITooLong(w http.ResponseWriter, response interface{}) { +func RequestURITooLong(w http.ResponseWriter, response any) { Respond(w, http.StatusRequestURITooLong, response) } // UnsupportedMediaType writes a response with status code 415. -func UnsupportedMediaType(w http.ResponseWriter, response interface{}) { +func UnsupportedMediaType(w http.ResponseWriter, response any) { Respond(w, http.StatusUnsupportedMediaType, response) } // RequestedRangeNotSatisfiable writes a response with status code 416. -func RequestedRangeNotSatisfiable(w http.ResponseWriter, response interface{}) { +func RequestedRangeNotSatisfiable(w http.ResponseWriter, response any) { Respond(w, http.StatusRequestedRangeNotSatisfiable, response) } // ExpectationFailed writes a response with status code 417. -func ExpectationFailed(w http.ResponseWriter, response interface{}) { +func ExpectationFailed(w http.ResponseWriter, response any) { Respond(w, http.StatusExpectationFailed, response) } // Teapot writes a response with status code 418. -func Teapot(w http.ResponseWriter, response interface{}) { +func Teapot(w http.ResponseWriter, response any) { Respond(w, http.StatusTeapot, response) } // UnprocessableEntity writes a response with status code 422. -func UnprocessableEntity(w http.ResponseWriter, response interface{}) { +func UnprocessableEntity(w http.ResponseWriter, response any) { Respond(w, http.StatusUnprocessableEntity, response) } // UpgradeRequired writes a response with status code 426. -func UpgradeRequired(w http.ResponseWriter, response interface{}) { +func UpgradeRequired(w http.ResponseWriter, response any) { Respond(w, http.StatusUpgradeRequired, response) } // PreconditionRequired writes a response with status code 428. -func PreconditionRequired(w http.ResponseWriter, response interface{}) { +func PreconditionRequired(w http.ResponseWriter, response any) { Respond(w, http.StatusPreconditionRequired, response) } // TooManyRequests writes a response with status code 429. -func TooManyRequests(w http.ResponseWriter, response interface{}) { +func TooManyRequests(w http.ResponseWriter, response any) { Respond(w, http.StatusTooManyRequests, response) } // RequestHeaderFieldsTooLarge writes a response with status code 431. -func RequestHeaderFieldsTooLarge(w http.ResponseWriter, response interface{}) { +func RequestHeaderFieldsTooLarge(w http.ResponseWriter, response any) { Respond(w, http.StatusRequestHeaderFieldsTooLarge, response) } // UnavailableForLegalReasons writes a response with status code 451. -func UnavailableForLegalReasons(w http.ResponseWriter, response interface{}) { +func UnavailableForLegalReasons(w http.ResponseWriter, response any) { Respond(w, http.StatusUnavailableForLegalReasons, response) } // InternalServerError writes a response with status code 500. -func InternalServerError(w http.ResponseWriter, response interface{}) { +func InternalServerError(w http.ResponseWriter, response any) { Respond(w, http.StatusInternalServerError, response) } // NotImplemented writes a response with status code 501. -func NotImplemented(w http.ResponseWriter, response interface{}) { +func NotImplemented(w http.ResponseWriter, response any) { Respond(w, http.StatusNotImplemented, response) } // BadGateway writes a response with status code 502. -func BadGateway(w http.ResponseWriter, response interface{}) { +func BadGateway(w http.ResponseWriter, response any) { Respond(w, http.StatusBadGateway, response) } // ServiceUnavailable writes a response with status code 503. -func ServiceUnavailable(w http.ResponseWriter, response interface{}) { +func ServiceUnavailable(w http.ResponseWriter, response any) { Respond(w, http.StatusServiceUnavailable, response) } // GatewayTimeout writes a response with status code 504. -func GatewayTimeout(w http.ResponseWriter, response interface{}) { +func GatewayTimeout(w http.ResponseWriter, response any) { Respond(w, http.StatusGatewayTimeout, response) } // HTTPVersionNotSupported writes a response with status code 505. -func HTTPVersionNotSupported(w http.ResponseWriter, response interface{}) { +func HTTPVersionNotSupported(w http.ResponseWriter, response any) { Respond(w, http.StatusHTTPVersionNotSupported, response) } diff --git a/pkg/jsonhttp/jsonhttp_test.go b/pkg/jsonhttp/jsonhttp_test.go index 61a17684e5e..43b7b90b5ad 100644 --- a/pkg/jsonhttp/jsonhttp_test.go +++ b/pkg/jsonhttp/jsonhttp_test.go @@ -134,7 +134,7 @@ func TestRespond_special(t *testing.T) { for _, tc := range []struct { name string code int - response interface{} + response any wantMessage string }{ { @@ -245,7 +245,7 @@ func TestStandardHTTPResponds(t *testing.T) { t.Parallel() for _, tc := range []struct { - f func(w http.ResponseWriter, response interface{}) + f func(w http.ResponseWriter, response any) code int }{ {f: jsonhttp.Continue, code: http.StatusContinue}, diff --git a/pkg/jsonhttp/jsonhttptest/jsonhttptest.go b/pkg/jsonhttp/jsonhttptest/jsonhttptest.go index 7902a566dc9..9e64b0a8422 100644 --- a/pkg/jsonhttp/jsonhttptest/jsonhttptest.go +++ b/pkg/jsonhttp/jsonhttptest/jsonhttptest.go @@ -157,7 +157,7 @@ func WithRequestBody(body io.Reader) Option { // WithJSONRequestBody writes a request JSON-encoded body to the request made by // the Request function. -func WithJSONRequestBody(r interface{}) Option { +func WithJSONRequestBody(r any) Option { return optionFunc(func(o *options) error { b, err := json.Marshal(r) if err != nil { @@ -256,7 +256,7 @@ func WithNonEmptyResponseHeader(key string) Option { // WithExpectedJSONResponse validates that the response from the request in the // Request function matches JSON-encoded body provided here. -func WithExpectedJSONResponse(response interface{}) Option { +func WithExpectedJSONResponse(response any) Option { return optionFunc(func(o *options) error { o.expectedJSONResponse = response return nil @@ -265,7 +265,7 @@ func WithExpectedJSONResponse(response interface{}) Option { // WithUnmarshalJSONResponse unmarshals response body from the request in the // Request function to the provided response. Response must be a pointer. -func WithUnmarshalJSONResponse(response interface{}) Option { +func WithUnmarshalJSONResponse(response any) Option { return optionFunc(func(o *options) error { o.unmarshalResponse = response return nil @@ -304,8 +304,8 @@ type options struct { expectedResponseHeaders http.Header nonEmptyResponseHeaders []string expectedResponse []byte - expectedJSONResponse interface{} - unmarshalResponse interface{} + expectedJSONResponse any + unmarshalResponse any responseBody *[]byte noResponseBody bool } diff --git a/pkg/jsonhttp/jsonhttptest/testing_mock_test.go b/pkg/jsonhttp/jsonhttptest/testing_mock_test.go index c496f13d8ae..0d375a926c3 100644 --- a/pkg/jsonhttp/jsonhttptest/testing_mock_test.go +++ b/pkg/jsonhttp/jsonhttptest/testing_mock_test.go @@ -62,11 +62,11 @@ func (m *mock) Helper() { m.isHelper = true } -func (m *mock) Errorf(format string, args ...interface{}) { +func (m *mock) Errorf(format string, args ...any) { m.got.errors = append(m.got.errors, fmt.Sprintf(format, args...)) } -func (m *mock) Fatal(args ...interface{}) { +func (m *mock) Fatal(args ...any) { m.got.fatal = fmt.Sprint(args...) panic(errFailed) // terminate the goroutine to detect it in the assert function } diff --git a/pkg/log/formatter.go b/pkg/log/formatter.go index bf0a71a9a7e..558e445376a 100644 --- a/pkg/log/formatter.go +++ b/pkg/log/formatter.go @@ -45,12 +45,12 @@ type Marshaler interface { // with exported fields // // It may return any value of any type. - MarshalLog() interface{} + MarshalLog() any } // PseudoStruct is a list of key-value pairs that gets logged as a struct. // E.g.: PseudoStruct{"f1", 1, "f2", true, "f3", []int{}}. -type PseudoStruct []interface{} +type PseudoStruct []any // fmtOptions carries parameters which influence the way logs are generated/formatted. type fmtOptions struct { @@ -97,7 +97,7 @@ type formatter struct { // render produces a log line where the base is // never escaped; the opposite is true for args. -func (f *formatter) render(base, args []interface{}) []byte { +func (f *formatter) render(base, args []any) []byte { buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.opts.jsonOutput { buf.WriteByte('{') @@ -116,7 +116,7 @@ func (f *formatter) render(base, args []interface{}) []byte { // separator (which depends on the output format) before the first pair is // written. If escapeKeys is true, the keys are assumed to have // non-JSON-compatible characters in them and must be evaluated for escapes. -func (f *formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) { +func (f *formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -159,7 +159,7 @@ func (f *formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing // prettyWithFlags prettifies the given value. // TODO: This is not fast. Most of the overhead goes here. -func (f *formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { +func (f *formatter) prettyWithFlags(value any, flags uint32, depth int) string { const flagRawStruct = 0x1 // Do not print braces on structs. if depth > f.opts.maxLogDepth { @@ -387,12 +387,12 @@ func (f *formatter) caller() caller { } // nonStringKey converts non-string value v to string. -func (f *formatter) nonStringKey(v interface{}) string { +func (f *formatter) nonStringKey(v any) string { return fmt.Sprintf("", f.snippet(v)) } // snippet produces a short snippet string of an arbitrary value. -func (f *formatter) snippet(v interface{}) string { +func (f *formatter) snippet(v any) string { const snipLen = 16 snip := f.prettyWithFlags(v, 0, 0) @@ -405,7 +405,7 @@ func (f *formatter) snippet(v interface{}) string { // sanitize ensures that a list of key-value pairs has a value for every key // (adding a value if needed) and that each key is a string (substituting a key // if needed). -func (f *formatter) sanitize(kvList []interface{}) []interface{} { +func (f *formatter) sanitize(kvList []any) []any { if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } @@ -466,7 +466,7 @@ func needsEscape(s string) bool { } // invokeMarshaler returns panic-safe output from the Marshaler.MarshalLog() method. -func invokeMarshaler(m Marshaler) (ret interface{}) { +func invokeMarshaler(m Marshaler) (ret any) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("", r) diff --git a/pkg/log/formatter_test.go b/pkg/log/formatter_test.go index 7dd44b87104..c78cce38453 100644 --- a/pkg/log/formatter_test.go +++ b/pkg/log/formatter_test.go @@ -36,7 +36,7 @@ func (p pointErr) MarshalText() ([]byte, error) { // marshalerTest expect to result in the MarshalLog() value when logged. type marshalerTest struct{ val string } -func (marshalerTest) MarshalLog() interface{} { +func (marshalerTest) MarshalLog() any { return struct{ Inner string }{"I am a log.Marshaler"} } func (marshalerTest) String() string { @@ -50,7 +50,7 @@ func (marshalerTest) Error() string { // marshalerPanicTest expect this to result in a panic when logged. type marshalerPanicTest struct{ val string } -func (marshalerPanicTest) MarshalLog() interface{} { +func (marshalerPanicTest) MarshalLog() any { panic("marshalerPanicTest") } @@ -207,9 +207,9 @@ type ( Inner1Test `json:"inner1"` Inner2Test `json:"-"` Inner3Test `json:"-,"` - Inner4Test `json:"inner4,omitempty"` + Inner4Test `json:"inner4"` Inner5Test `json:","` - Inner6Test `json:"inner6,omitempty"` + Inner6Test `json:"inner6"` } ) @@ -218,7 +218,7 @@ func TestPretty(t *testing.T) { strPtr := func(s string) *string { return &s } testCases := []struct { - val interface{} + val any exp string // used in testCases where JSON can't handle it }{{ val: "strval", @@ -369,11 +369,11 @@ func TestPretty(t *testing.T) { val: struct { A *int B *int - C interface{} - D interface{} + C any + D any }{ B: intPtr(1), - D: interface{}(2), + D: any(2), }, }, { val: marshalerTest{"foobar"}, @@ -650,13 +650,13 @@ func TestPretty(t *testing.T) { } } -func makeKV(args ...interface{}) []interface{} { return args } +func makeKV(args ...any) []any { return args } func TestRender(t *testing.T) { testCases := []struct { name string - builtins []interface{} - args []interface{} + builtins []any + args []any wantKV string wantJSON string }{{ @@ -665,8 +665,8 @@ func TestRender(t *testing.T) { wantJSON: "{}", }, { name: "empty", - builtins: []interface{}{}, - args: []interface{}{}, + builtins: []any{}, + args: []any{}, wantKV: "", wantJSON: "{}", }, { @@ -739,12 +739,12 @@ func TestRender(t *testing.T) { func TestSanitize(t *testing.T) { testCases := []struct { name string - kv []interface{} - want []interface{} + kv []any + want []any }{{ name: "empty", - kv: []interface{}{}, - want: []interface{}{}, + kv: []any{}, + want: []any{}, }, { name: "already sane", kv: makeKV("int", 1, "str", "ABC", "bool", true), diff --git a/pkg/log/httpaccess/http_access.go b/pkg/log/httpaccess/http_access.go index 668b449813a..fbc350dba60 100644 --- a/pkg/log/httpaccess/http_access.go +++ b/pkg/log/httpaccess/http_access.go @@ -58,7 +58,7 @@ func NewHTTPAccessLogHandler(logger log.Logger, tracer *tracing.Tracer, message ip = r.RemoteAddr } - fields := []interface{}{ + fields := []any{ "ip", ip, "method", r.Method, "host", r.Host, diff --git a/pkg/log/log.go b/pkg/log/log.go index 5a517d02a33..0efa22c6127 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -116,7 +116,7 @@ type Builder interface { // WithValues specifies additional key/value pairs // to be logged with each log line. - WithValues(keysAndValues ...interface{}) Builder + WithValues(keysAndValues ...any) Builder // Build returns a new or existing Logger // instance, if such instance already exists. @@ -143,28 +143,28 @@ type Logger interface { // the log line. The key/value pairs can then be used to add additional // variable information. The key/value pairs must alternate string keys // and arbitrary values. - Debug(msg string, keysAndValues ...interface{}) + Debug(msg string, keysAndValues ...any) // Info logs an info message with the given key/value pairs as context. // The msg argument should be used to add some constant description to // the log line. The key/value pairs can then be used to add additional // variable information. The key/value pairs must alternate string keys // and arbitrary values. - Info(msg string, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...any) // Warning logs a warning message with the given key/value pairs as context. // The msg argument should be used to add some constant description to // the log line. The key/value pairs can then be used to add additional // variable information. The key/value pairs must alternate string keys // and arbitrary values. - Warning(msg string, keysAndValues ...interface{}) + Warning(msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as context. // The msg argument should be used to add context to any underlying error, // while the err argument should be used to attach the actual error that // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) } // Lock wraps io.Writer in a mutex to make it safe for concurrent use. diff --git a/pkg/log/logger.go b/pkg/log/logger.go index f40ffd59916..bb81d327172 100644 --- a/pkg/log/logger.go +++ b/pkg/log/logger.go @@ -58,7 +58,7 @@ type builder struct { // values holds additional key/value pairs // that are included on every log call. - values []interface{} + values []any // valuesStr is a cache of render values slice, so // we don't have to render them on each Build call. @@ -83,7 +83,7 @@ func (b *builder) WithName(name string) Builder { } // WithValues implements the Builder interface WithValues method. -func (b *builder) WithValues(keysAndValues ...interface{}) Builder { +func (b *builder) WithValues(keysAndValues ...any) Builder { c := b.clone() c.values = append(c.values, keysAndValues...) return c @@ -133,7 +133,7 @@ func (b *builder) clone() *builder { c := *b c.cloned = true c.names = append(make([]string, 0, len(c.names)), c.names...) - c.values = append(make([]interface{}, 0, len(c.values)), c.values...) + c.values = append(make([]any, 0, len(c.values)), c.values...) return &c } @@ -176,7 +176,7 @@ func (l *logger) Verbosity() Level { } // Debug implements the Logger interface Debug method. -func (l *logger) Debug(msg string, keysAndValues ...interface{}) { +func (l *logger) Debug(msg string, keysAndValues ...any) { if int(l.verbosity.get()) >= int(l.v) { if err := l.log(VerbosityDebug, CategoryDebug, nil, msg, keysAndValues...); err != nil { fmt.Fprintln(os.Stderr, err) @@ -185,7 +185,7 @@ func (l *logger) Debug(msg string, keysAndValues ...interface{}) { } // Info implements the Logger interface Info method. -func (l *logger) Info(msg string, keysAndValues ...interface{}) { +func (l *logger) Info(msg string, keysAndValues ...any) { if l.verbosity.get() >= VerbosityInfo { if err := l.log(VerbosityInfo, CategoryInfo, nil, msg, keysAndValues...); err != nil { fmt.Fprintln(os.Stderr, err) @@ -194,7 +194,7 @@ func (l *logger) Info(msg string, keysAndValues ...interface{}) { } // Warning implements the Logger interface Warning method. -func (l *logger) Warning(msg string, keysAndValues ...interface{}) { +func (l *logger) Warning(msg string, keysAndValues ...any) { if l.verbosity.get() >= VerbosityWarning { if err := l.log(VerbosityWarning, CategoryWarning, nil, msg, keysAndValues...); err != nil { fmt.Fprintln(os.Stderr, err) @@ -203,7 +203,7 @@ func (l *logger) Warning(msg string, keysAndValues ...interface{}) { } // Error implements the Logger interface Error method. -func (l *logger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *logger) Error(err error, msg string, keysAndValues ...any) { if l.verbosity.get() >= VerbosityError { if err := l.log(VerbosityError, CategoryError, err, msg, keysAndValues...); err != nil { fmt.Fprintln(os.Stderr, err) @@ -218,8 +218,8 @@ func (l *logger) setVerbosity(v Level) { // log logs the given msg and key-value pairs with the given level // and the given message category caller (if enabled) to the sink. -func (l *logger) log(vl Level, mc MessageCategory, err error, msg string, keysAndValues ...interface{}) error { - base := make([]interface{}, 0, 14+len(keysAndValues)) +func (l *logger) log(vl Level, mc MessageCategory, err error, msg string, keysAndValues ...any) error { + base := make([]any, 0, 14+len(keysAndValues)) if l.formatter.opts.logTimestamp { base = append(base, "time", time.Now().Format(l.formatter.opts.timestampLayout)) } diff --git a/pkg/log/logger_test.go b/pkg/log/logger_test.go index f7c23308808..e3921b60a84 100644 --- a/pkg/log/logger_test.go +++ b/pkg/log/logger_test.go @@ -30,8 +30,8 @@ func (h *hook) Fire(Level) error { } // applyError is a higher order function that returns the given fn with an applied err. -func applyError(fn func(error, string, ...interface{}), err error) func(string, ...interface{}) { - return func(msg string, kvs ...interface{}) { +func applyError(fn func(error, string, ...any), err error) func(string, ...any) { + return func(msg string, kvs ...any) { fn(err, msg, kvs...) } } @@ -73,7 +73,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) { logger, _ := newLogger(WithLevelHooks(VerbosityNone, &have)) tests := []struct { - fn func(string, ...interface{}) + fn func(string, ...any) want bool }{{ fn: logger.Build().Debug, @@ -102,7 +102,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) { logger, _ := newLogger(WithLevelHooks(VerbosityDebug, &have)) tests := []struct { - fn func(string, ...interface{}) + fn func(string, ...any) want bool }{{ fn: logger.Build().Debug, @@ -131,7 +131,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) { logger, _ := newLogger(WithLevelHooks(VerbosityInfo, &have)) tests := []struct { - fn func(string, ...interface{}) + fn func(string, ...any) want bool }{{ fn: logger.Build().Debug, @@ -160,7 +160,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) { logger, _ := newLogger(WithLevelHooks(VerbosityWarning, &have)) tests := []struct { - fn func(string, ...interface{}) + fn func(string, ...any) want bool }{{ fn: logger.Build().Debug, @@ -189,7 +189,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) { logger, _ := newLogger(WithLevelHooks(VerbosityError, &have)) tests := []struct { - fn func(string, ...interface{}) + fn func(string, ...any) want bool }{{ fn: logger.Build().Debug, @@ -218,7 +218,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) { logger, _ := newLogger(WithLevelHooks(VerbosityAll, &have)) tests := []struct { - fn func(string, ...interface{}) + fn func(string, ...any) want bool }{{ fn: logger.Build().Debug, @@ -275,8 +275,8 @@ func TestLogger(t *testing.T) { testCases := []struct { name string - logFn func(string, ...interface{}) - args []interface{} + logFn func(string, ...any) + args []any want string }{{ name: "just msg", @@ -626,8 +626,8 @@ func TestLoggerWithName(t *testing.T) { testCases := []struct { name string - logFn func(string, ...interface{}) - args []interface{} + logFn func(string, ...any) + args []any want string }{{ name: "one", @@ -694,8 +694,8 @@ func TestLoggerWithValues(t *testing.T) { testCases := []struct { name string - logFn func(string, ...interface{}) - args []interface{} + logFn func(string, ...any) + args []any want string }{{ name: "zero", diff --git a/pkg/log/noop_logger.go b/pkg/log/noop_logger.go index 590a717d722..96fbddefd34 100644 --- a/pkg/log/noop_logger.go +++ b/pkg/log/noop_logger.go @@ -9,14 +9,14 @@ var Noop Logger = new(noopLogger) type noopLogger struct{} -func (nl *noopLogger) V(_ uint) Builder { return nl } -func (nl *noopLogger) WithName(_ string) Builder { return nl } -func (nl *noopLogger) WithValues(_ ...interface{}) Builder { return nl } -func (nl *noopLogger) Build() Logger { return nl } -func (nl *noopLogger) Register() Logger { return nl } +func (nl *noopLogger) V(_ uint) Builder { return nl } +func (nl *noopLogger) WithName(_ string) Builder { return nl } +func (nl *noopLogger) WithValues(_ ...any) Builder { return nl } +func (nl *noopLogger) Build() Logger { return nl } +func (nl *noopLogger) Register() Logger { return nl } -func (nl *noopLogger) Verbosity() Level { return VerbosityNone } -func (nl *noopLogger) Debug(_ string, _ ...interface{}) {} -func (nl *noopLogger) Info(_ string, _ ...interface{}) {} -func (nl *noopLogger) Warning(_ string, _ ...interface{}) {} -func (nl *noopLogger) Error(_ error, _ string, _ ...interface{}) {} +func (nl *noopLogger) Verbosity() Level { return VerbosityNone } +func (nl *noopLogger) Debug(_ string, _ ...any) {} +func (nl *noopLogger) Info(_ string, _ ...any) {} +func (nl *noopLogger) Warning(_ string, _ ...any) {} +func (nl *noopLogger) Error(_ error, _ string, _ ...any) {} diff --git a/pkg/log/registry.go b/pkg/log/registry.go index a467cfc39cf..bd7fa3587c6 100644 --- a/pkg/log/registry.go +++ b/pkg/log/registry.go @@ -124,7 +124,7 @@ func SetVerbosityByExp(e string, v Level) error { } var merr *multierror.Error - loggers.Range(func(key, val interface{}) bool { + loggers.Range(func(key, val any) bool { if rex.MatchString(key.(string)) { merr = multierror.Append(merr, SetVerbosity(val.(*logger), v)) } @@ -135,7 +135,7 @@ func SetVerbosityByExp(e string, v Level) error { // RegistryIterate iterates through all registered loggers. func RegistryIterate(fn func(id, path string, verbosity Level, v uint) (next bool)) { - loggers.Range(func(_, val interface{}) bool { + loggers.Range(func(_, val any) bool { l := val.(*logger) return fn(l.id, l.namesStr, l.verbosity.get(), l.v) }) diff --git a/pkg/log/registry_test.go b/pkg/log/registry_test.go index dbfb1410d84..e83ac0bc588 100644 --- a/pkg/log/registry_test.go +++ b/pkg/log/registry_test.go @@ -75,10 +75,10 @@ func TestNewLogger(t *testing.T) { var ( cnt int - val interface{} + val any ) NewLogger("root").Register() - loggers.Range(func(k, v interface{}) bool { + loggers.Range(func(k, v any) bool { cnt++ val = v return true @@ -119,7 +119,7 @@ func TestSetVerbosity(t *testing.T) { NewLogger("root").WithName("child1").WithValues("abc", 123).Register() registered := make(map[string]*logger) - loggers.Range(func(k, v interface{}) bool { + loggers.Range(func(k, v any) bool { registered[k.(string)] = v.(*logger) return true }) @@ -185,7 +185,7 @@ func TestRegistryRange(t *testing.T) { NewLogger("root").WithName("child1").WithValues("abc", 123).Register() registered := make(map[string]*logger) - loggers.Range(func(k, v interface{}) bool { + loggers.Range(func(k, v any) bool { registered[k.(string)] = v.(*logger) return true }) diff --git a/pkg/manifest/mantaray/marshal.go b/pkg/manifest/mantaray/marshal.go index f5f7edabc8f..6d355721c1c 100644 --- a/pkg/manifest/mantaray/marshal.go +++ b/pkg/manifest/mantaray/marshal.go @@ -447,7 +447,7 @@ func nodeRefBytes(f *fork) []byte { func encryptDecrypt(input, key []byte) []byte { output := make([]byte, len(input)) - for i := 0; i < len(input); i++ { + for i := range input { output[i] = input[i] ^ key[i%len(key)] } diff --git a/pkg/manifest/mantaray/marshal_test.go b/pkg/manifest/mantaray/marshal_test.go index e0d728602fa..8d0cb827b84 100644 --- a/pkg/manifest/mantaray/marshal_test.go +++ b/pkg/manifest/mantaray/marshal_test.go @@ -177,7 +177,7 @@ func TestMarshal(t *testing.T) { i++ return b } - for i := 0; i < len(testEntries); i++ { + for i := range testEntries { c := testEntries[i].Path e := testEntries[i].Entry if len(e) == 0 { diff --git a/pkg/manifest/mantaray/node_test.go b/pkg/manifest/mantaray/node_test.go index b9a8da787d7..80b0ed4963b 100644 --- a/pkg/manifest/mantaray/node_test.go +++ b/pkg/manifest/mantaray/node_test.go @@ -41,14 +41,14 @@ func TestAddAndLookup(t *testing.T) { []byte("aa"), []byte("b"), } - for i := 0; i < len(testCases); i++ { + for i := range testCases { c := testCases[i] e := append(make([]byte, 32-len(c)), c...) err := n.Add(ctx, c, e, nil, nil) if err != nil { t.Fatalf("expected no error, got %v", err) } - for j := 0; j < i; j++ { + for j := range i { d := testCases[j] m, err := n.Lookup(ctx, d, nil) if err != nil { diff --git a/pkg/manifest/mantaray/persist_test.go b/pkg/manifest/mantaray/persist_test.go index e0eb86e6b09..619989d3803 100644 --- a/pkg/manifest/mantaray/persist_test.go +++ b/pkg/manifest/mantaray/persist_test.go @@ -32,7 +32,7 @@ func TestPersistIdempotence(t *testing.T) { } ctx := context.Background() var ls mantaray.LoadSaver = newMockLoadSaver() - for i := 0; i < len(paths); i++ { + for i := range paths { c := paths[i] err := n.Save(ctx, ls) if err != nil { @@ -49,7 +49,7 @@ func TestPersistIdempotence(t *testing.T) { if err != nil { t.Fatalf("expected no error, got %v", err) } - for i := 0; i < len(paths); i++ { + for i := range paths { c := paths[i] m, err := n.Lookup(ctx, c, ls) if err != nil { diff --git a/pkg/manifest/mantaray/walker_test.go b/pkg/manifest/mantaray/walker_test.go index f6d72687a39..76c23f96e69 100644 --- a/pkg/manifest/mantaray/walker_test.go +++ b/pkg/manifest/mantaray/walker_test.go @@ -55,7 +55,7 @@ func TestWalkNode(t *testing.T) { n := mantaray.New() - for i := 0; i < len(toAdd); i++ { + for i := range toAdd { c := toAdd[i] e := append(make([]byte, 32-len(c)), c...) err := n.Add(ctx, c, e, nil, nil) diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index cbe2ae20c58..64ceef0c51e 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -18,7 +18,7 @@ type Collector interface { Metrics() []prometheus.Collector } -func PrometheusCollectorsFromFields(i interface{}) (cs []prometheus.Collector) { +func PrometheusCollectorsFromFields(i any) (cs []prometheus.Collector) { v := reflect.Indirect(reflect.ValueOf(i)) for i := 0; i < v.NumField(); i++ { if !v.Field(i).CanInterface() { diff --git a/pkg/node/bootstrap.go b/pkg/node/bootstrap.go index 4ce709b16d9..047c306aaf2 100644 --- a/pkg/node/bootstrap.go +++ b/pkg/node/bootstrap.go @@ -204,7 +204,7 @@ func bootstrapNode( eventsJSON []byte ) - for i := 0; i < getSnapshotRetries; i++ { + for range getSnapshotRetries { if err != nil { time.Sleep(retryWait) } @@ -223,7 +223,7 @@ func bootstrapNode( return nil, err } - for i := 0; i < getSnapshotRetries; i++ { + for range getSnapshotRetries { if err != nil { time.Sleep(retryWait) } diff --git a/pkg/p2p/libp2p/connections_test.go b/pkg/p2p/libp2p/connections_test.go index 51227ab1555..93d8ab41740 100644 --- a/pkg/p2p/libp2p/connections_test.go +++ b/pkg/p2p/libp2p/connections_test.go @@ -126,7 +126,7 @@ func TestLightPeerLimit(t *testing.T) { addr := serviceUnderlayAddress(t, sf) - for i := 0; i < 5; i++ { + for range 5 { sl, _ := newService(t, 1, libp2pServiceOpts{ notifier: notifier, libp2pOpts: libp2p.Options{ @@ -240,7 +240,7 @@ func TestStreamsMaxIncomingLimit(t *testing.T) { // close random streams to validate new streams creation random := rand.New(rand.NewSource(time.Now().UnixNano())) - for i := 0; i < closeStreamCount; i++ { + for range closeStreamCount { n := random.Intn(len(streams)) if err := streams[n].Reset(); err != nil { t.Error(err) @@ -840,7 +840,7 @@ func TestTopologyAnnounce(t *testing.T) { expectPeersEventually(t, s1, overlay3) called := false - for i := 0; i < 20; i++ { + for range 20 { mtx.Lock() called = announceCalled mtx.Unlock() @@ -852,7 +852,7 @@ func TestTopologyAnnounce(t *testing.T) { if !called { t.Error("expected announce to be called") } - for i := 0; i < 10; i++ { + for range 10 { mtx.Lock() called = announceToCalled mtx.Unlock() @@ -878,7 +878,7 @@ func TestTopologyAnnounce(t *testing.T) { expectPeers(t, s2, overlay1) expectPeersEventually(t, s1, overlay2, overlay3) - for i := 0; i < 20; i++ { + for range 20 { mtx.Lock() called = announceToCalled mtx.Unlock() diff --git a/pkg/p2p/libp2p/libp2p.go b/pkg/p2p/libp2p/libp2p.go index 8020b381708..e1091e0b637 100644 --- a/pkg/p2p/libp2p/libp2p.go +++ b/pkg/p2p/libp2p/libp2p.go @@ -367,7 +367,7 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay } func (s *Service) reachabilityWorker() error { - sub, err := s.host.EventBus().Subscribe([]interface{}{new(event.EvtLocalReachabilityChanged)}) + sub, err := s.host.EventBus().Subscribe([]any{new(event.EvtLocalReachabilityChanged)}) if err != nil { return fmt.Errorf("failed subscribing to reachability event %w", err) } @@ -1060,7 +1060,7 @@ func (s *Service) peerUserAgent(ctx context.Context, peerID libp2ppeer.ID) strin ctx, cancel := context.WithTimeout(ctx, peerUserAgentTimeout) defer cancel() var ( - v interface{} + v any err error ) // Peerstore may not contain all keys and values right after the connections is created. diff --git a/pkg/p2p/protobuf/protobuf_test.go b/pkg/p2p/protobuf/protobuf_test.go index 3a1edf30bf5..f70e3754161 100644 --- a/pkg/p2p/protobuf/protobuf_test.go +++ b/pkg/p2p/protobuf/protobuf_test.go @@ -49,7 +49,7 @@ func TestReader_ReadMsg(t *testing.T) { r := tc.readerFunc() var msg pb.Message - for i := 0; i < len(messages); i++ { + for i := range messages { err := r.ReadMsg(&msg) if i == len(messages) { if !errors.Is(err, io.EOF) { @@ -105,7 +105,7 @@ func TestReader_timeout(t *testing.T) { r := tc.readerFunc() var msg pb.Message - for i := 0; i < len(messages); i++ { + for i := range messages { var timeout time.Duration if i == 0 { timeout = 1000 * time.Millisecond diff --git a/pkg/p2p/streamtest/streamtest.go b/pkg/p2p/streamtest/streamtest.go index ae312624149..92e538d83bd 100644 --- a/pkg/p2p/streamtest/streamtest.go +++ b/pkg/p2p/streamtest/streamtest.go @@ -348,10 +348,7 @@ func (r *record) Read(p []byte) (n int, err error) { r.lock.Lock() defer r.lock.Unlock() - end := r.c + len(p) - if end > len(r.b) { - end = len(r.b) - } + end := min(r.c+len(p), len(r.b)) n = copy(p, r.b[r.c:end]) r.c += n diff --git a/pkg/p2p/streamtest/streamtest_test.go b/pkg/p2p/streamtest/streamtest_test.go index 0b26d2da7ce..7dd1ae02091 100644 --- a/pkg/p2p/streamtest/streamtest_test.go +++ b/pkg/p2p/streamtest/streamtest_test.go @@ -813,7 +813,7 @@ func testRecords(t *testing.T, records []*streamtest.Record, want [][2]string, w t.Fatalf("got %v records, want %v", lr, lw) } - for i := 0; i < lr; i++ { + for i := range lr { record := records[i] if err := record.Err(); !errors.Is(err, wantErr) { diff --git a/pkg/postage/batchstore/mock/store_test.go b/pkg/postage/batchstore/mock/store_test.go index d376c359819..8100da0dac7 100644 --- a/pkg/postage/batchstore/mock/store_test.go +++ b/pkg/postage/batchstore/mock/store_test.go @@ -32,7 +32,7 @@ func TestBatchStore(t *testing.T) { } // Update should return error after a number of tries: - for i := 0; i < testCnt; i++ { + for range testCnt { if err := batchStore.Update(testBatch, big.NewInt(0), 0); err != nil { t.Fatal(err) } @@ -45,7 +45,7 @@ func TestBatchStore(t *testing.T) { if _, err := batchStore.Get(postagetesting.MustNewID()); err == nil { t.Fatal("expected error") } - for i := 0; i < testCnt-1; i++ { + for range testCnt - 1 { if _, err := batchStore.Get(testBatch.ID); err != nil { t.Fatal(err) } @@ -67,7 +67,7 @@ func TestBatchStorePutChainState(t *testing.T) { ) // PutChainState should return an error after a number of tries: - for i := 0; i < testCnt; i++ { + for range testCnt { if err := batchStore.PutChainState(testChainState); err != nil { t.Fatal(err) } diff --git a/pkg/postage/batchstore/store_test.go b/pkg/postage/batchstore/store_test.go index 603dd24f398..975c93f3e18 100644 --- a/pkg/postage/batchstore/store_test.go +++ b/pkg/postage/batchstore/store_test.go @@ -602,7 +602,7 @@ func addBatch(t *testing.T, s postage.Storer, depth uint8, value int) *postage.B return batch } -func stateStoreGet(t *testing.T, st storage.StateStorer, k string, v interface{}) { +func stateStoreGet(t *testing.T, st storage.StateStorer, k string, v any) { t.Helper() if err := st.Get(k, v); err != nil { @@ -610,7 +610,7 @@ func stateStoreGet(t *testing.T, st storage.StateStorer, k string, v interface{} } } -func stateStorePut(t *testing.T, st storage.StateStorer, k string, v interface{}) { +func stateStorePut(t *testing.T, st storage.StateStorer, k string, v any) { t.Helper() if err := st.Put(k, v); err != nil { diff --git a/pkg/postage/listener/listener_test.go b/pkg/postage/listener/listener_test.go index e3ee957a3e8..bd15c2659ab 100644 --- a/pkg/postage/listener/listener_test.go +++ b/pkg/postage/listener/listener_test.go @@ -536,19 +536,19 @@ func TestListenerBatchState(t *testing.T) { func newEventUpdaterMock() *updater { return &updater{ - eventC: make(chan interface{}, 1), + eventC: make(chan any, 1), } } func newEventUpdaterMockWithBlockNumberUpdateError(err error) *updater { return &updater{ - eventC: make(chan interface{}, 1), + eventC: make(chan any, 1), blockNumberUpdateError: err, } } type updater struct { - eventC chan interface{} + eventC chan any blockNumberUpdateError error } diff --git a/pkg/postage/service_test.go b/pkg/postage/service_test.go index 1ae9dfd1b46..1237fecc066 100644 --- a/pkg/postage/service_test.go +++ b/pkg/postage/service_test.go @@ -36,7 +36,7 @@ func TestSaveLoad(t *testing.T) { if err != nil { t.Fatal(err) } - for i := 0; i < 16; i++ { + for range 16 { err := ps.Add(newTestStampIssuer(t, 1000)) if err != nil { t.Fatal(err) diff --git a/pkg/postage/stamper_test.go b/pkg/postage/stamper_test.go index 383c534a0c4..f362ee8e23c 100644 --- a/pkg/postage/stamper_test.go +++ b/pkg/postage/stamper_test.go @@ -72,7 +72,7 @@ func TestStamperStamping(t *testing.T) { chunkAddr, _ := createStamp(t, stamper) // issue another 15 // collision depth is 8, committed batch depth is 12, bucket volume 2^4 - for i := 0; i < 14; i++ { + for i := range 14 { randAddr := swarm.RandAddressAt(t, chunkAddr, 8) _, err = stamper.Stamp(randAddr, randAddr) if err != nil { @@ -98,7 +98,7 @@ func TestStamperStamping(t *testing.T) { chunkAddr, _ := createStamp(t, stamper) // issue another 15 // collision depth is 8, committed batch depth is 12, bucket volume 2^4 - for i := 0; i < 15; i++ { + for i := range 15 { randAddr := swarm.RandAddressAt(t, chunkAddr, 8) _, err = stamper.Stamp(randAddr, randAddr) if err != nil { diff --git a/pkg/postage/stampissuer_test.go b/pkg/postage/stampissuer_test.go index 5e7bfa71cbc..05c177360a7 100644 --- a/pkg/postage/stampissuer_test.go +++ b/pkg/postage/stampissuer_test.go @@ -176,7 +176,7 @@ func Test_StampIssuer_inc(t *testing.T) { count := sti.BucketUpperBound() // Increment to upper bound (fill bucket to max cap) - for i := uint32(0); i < count; i++ { + for range count { _, _, err := sti.Increment(addr) if err != nil { t.Fatal(err) @@ -184,7 +184,7 @@ func Test_StampIssuer_inc(t *testing.T) { } // Incrementing stamp issuer above upper bound should return index starting from 0 - for i := uint32(0); i < count; i++ { + for i := range count { idxb, _, err := sti.Increment(addr) if err != nil { t.Fatal(err) @@ -203,7 +203,7 @@ func Test_StampIssuer_inc(t *testing.T) { count := sti.BucketUpperBound() // Increment to upper bound (fill bucket to max cap) - for i := uint32(0); i < count; i++ { + for range count { _, _, err := sti.Increment(addr) if err != nil { t.Fatal(err) @@ -211,7 +211,7 @@ func Test_StampIssuer_inc(t *testing.T) { } // Incrementing stamp issuer above upper bound should return error - for i := uint32(0); i < count; i++ { + for range count { _, _, err := sti.Increment(addr) if !errors.Is(err, postage.ErrBucketFull) { t.Fatal("bucket should be full") @@ -230,7 +230,7 @@ func TestUtilization(t *testing.T) { var eg errgroup.Group - for i := 0; i < 8; i++ { + for range 8 { eg.Go(func() error { for { _, _, err := sti.Increment(swarm.RandAddress(t)) diff --git a/pkg/pss/mining_test.go b/pkg/pss/mining_test.go index 1cb4cc03282..94e469bf6ca 100644 --- a/pkg/pss/mining_test.go +++ b/pkg/pss/mining_test.go @@ -16,7 +16,7 @@ import ( func newTargets(length, depth int) pss.Targets { targets := make([]pss.Target, length) - for i := 0; i < length; i++ { + for i := range length { buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, uint64(i)) targets[i] = pss.Target(buf[:depth]) diff --git a/pkg/pss/pss.go b/pkg/pss/pss.go index 454f5960129..28319e9a615 100644 --- a/pkg/pss/pss.go +++ b/pkg/pss/pss.go @@ -130,7 +130,7 @@ func (p *pss) Register(topic Topic, handler Handler) (cleanup func()) { defer p.handlersMu.Unlock() h := p.handlers[topic] - for i := 0; i < len(h); i++ { + for i := range h { if h[i] == &handler { p.handlers[topic] = append(h[:i], h[i+1:]...) return diff --git a/pkg/pss/pss_test.go b/pkg/pss/pss_test.go index 685adb59fb9..2482f9b2c7b 100644 --- a/pkg/pss/pss_test.go +++ b/pkg/pss/pss_test.go @@ -219,7 +219,7 @@ func TestRegister(t *testing.T) { func waitHandlerCallback(t *testing.T, msgChan *chan struct{}, count int) { t.Helper() - for received := 0; received < count; received++ { + for range count { select { case <-*msgChan: case <-time.After(1 * time.Second): diff --git a/pkg/pss/trojan.go b/pkg/pss/trojan.go index b6b404f2389..8316ae6932e 100644 --- a/pkg/pss/trojan.go +++ b/pkg/pss/trojan.go @@ -215,7 +215,7 @@ func mine(ctx context.Context, odd bool, f func(nonce []byte) (swarm.Chunk, erro defer cancel() eg, ctx := errgroup.WithContext(ctx) result := make(chan swarm.Chunk, 8) - for i := 0; i < 8; i++ { + for range 8 { eg.Go(func() error { nonce := make([]byte, 32) copy(nonce, initnonce) diff --git a/pkg/puller/puller.go b/pkg/puller/puller.go index 2fdbc24e9cf..d53a45348e4 100644 --- a/pkg/puller/puller.go +++ b/pkg/puller/puller.go @@ -268,7 +268,7 @@ func (p *Puller) syncPeer(ctx context.Context, peer *syncPeer, storageRadius uin if peer.po >= storageRadius { // cancel all bins lower than the storage radius - for bin := uint8(0); bin < storageRadius; bin++ { + for bin := range storageRadius { peer.cancelBin(bin) } diff --git a/pkg/pullsync/pullsync_test.go b/pkg/pullsync/pullsync_test.go index fc80bae137f..d77b130936e 100644 --- a/pkg/pullsync/pullsync_test.go +++ b/pkg/pullsync/pullsync_test.go @@ -44,7 +44,7 @@ func init() { chunks = make([]swarm.Chunk, n) addrs = make([]swarm.Address, n) results = make([]*storer.BinC, n) - for i := 0; i < n; i++ { + for i := range n { chunks[i] = testingc.GenerateTestRandomChunk() addrs[i] = chunks[i].Address() stampHash, _ := chunks[i].Stamp().Hash() diff --git a/pkg/replicas/getter.go b/pkg/replicas/getter.go index 5dc42cb7c82..b08b5c780e8 100644 --- a/pkg/replicas/getter.go +++ b/pkg/replicas/getter.go @@ -60,9 +60,7 @@ func (g *getter) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, e errcnt := 0 // concurrently call to retrieve chunk using original CAC address - g.wg.Add(1) - go func() { - defer g.wg.Done() + g.wg.Go(func() { ch, err := g.Getter.Get(ctx, addr) if err != nil { errc <- err @@ -73,7 +71,7 @@ func (g *getter) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, e case resultC <- ch: case <-ctx.Done(): } - }() + }) // counters n := 0 // counts the replica addresses tried target := 2 // the number of replicas attempted to download in this batch @@ -117,9 +115,7 @@ func (g *getter) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, e continue } - g.wg.Add(1) - go func() { - defer g.wg.Done() + g.wg.Go(func() { ch, err := g.Getter.Get(ctx, swarm.NewAddress(so.addr)) if err != nil { errc <- err @@ -136,7 +132,7 @@ func (g *getter) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, e case resultC <- soc.WrappedChunk(): case <-ctx.Done(): } - }() + }) n++ if n < target { continue diff --git a/pkg/replicas/putter.go b/pkg/replicas/putter.go index 8e23059e1cf..7614dee56d0 100644 --- a/pkg/replicas/putter.go +++ b/pkg/replicas/putter.go @@ -43,15 +43,13 @@ func (p *putter) Put(ctx context.Context, ch swarm.Chunk) (err error) { errc := make(chan error, p.rLevel.GetReplicaCount()) wg := sync.WaitGroup{} for r := range rr.c { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { sch, err := soc.New(r.id, ch).Sign(signer) if err == nil { err = p.putter.Put(ctx, sch) } errc <- err - }() + }) } wg.Wait() diff --git a/pkg/salud/salud.go b/pkg/salud/salud.go index 28fe648d030..1cf4ffa8373 100644 --- a/pkg/salud/salud.go +++ b/pkg/salud/salud.go @@ -143,9 +143,7 @@ func (s *service) salud(mode string, durPercentile float64, connsPercentile floa ) err := s.topology.EachConnectedPeer(func(addr swarm.Address, bin uint8) (stop bool, jumpToNext bool, err error) { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) defer cancel() @@ -172,7 +170,7 @@ func (s *service) salud(mode string, durPercentile float64, connsPercentile floa neighborhoodTotalDur += dur.Seconds() } mtx.Unlock() - }() + }) return false, false, nil }, topology.Select{}) if err != nil { diff --git a/pkg/sharky/recovery.go b/pkg/sharky/recovery.go index 2b19ee93935..af064fda60a 100644 --- a/pkg/sharky/recovery.go +++ b/pkg/sharky/recovery.go @@ -30,7 +30,7 @@ func NewRecovery(dir string, shardCnt int, datasize int) (*Recovery, error) { shards := make([]*slots, shardCnt) shardFiles := make([]*os.File, shardCnt) - for i := 0; i < shardCnt; i++ { + for i := range shardCnt { file, err := os.OpenFile(path.Join(dir, fmt.Sprintf("shard_%03d", i)), os.O_RDWR, 0666) if errors.Is(err, fs.ErrNotExist) { return nil, fmt.Errorf("index %d: %w", i, ErrShardNotFound) diff --git a/pkg/sharky/recovery_test.go b/pkg/sharky/recovery_test.go index 1cc7ae3d55e..73571c5811e 100644 --- a/pkg/sharky/recovery_test.go +++ b/pkg/sharky/recovery_test.go @@ -114,7 +114,7 @@ func TestRecovery(t *testing.T) { defer cancel() runs := 96 - for i := 0; i < runs; i++ { + for range runs { loc, err := s.Write(cctx, payload) if err != nil { if errors.Is(err, context.DeadlineExceeded) { diff --git a/pkg/sharky/shard.go b/pkg/sharky/shard.go index 5073d63bd87..c9047c979a2 100644 --- a/pkg/sharky/shard.go +++ b/pkg/sharky/shard.go @@ -102,11 +102,9 @@ func (sh *shard) process() { defer func() { // this condition checks if an slot is in limbo (popped but not used for write op) if writes != nil { - sh.slots.limboWG.Add(1) - go func() { - defer sh.slots.limboWG.Done() + sh.slots.limboWG.Go(func() { sh.slots.in <- slot - }() + }) } }() free := sh.slots.out diff --git a/pkg/sharky/shard_slots_test.go b/pkg/sharky/shard_slots_test.go index 0a1ab02cf7e..7738a1595aa 100644 --- a/pkg/sharky/shard_slots_test.go +++ b/pkg/sharky/shard_slots_test.go @@ -156,18 +156,14 @@ func newShard(t *testing.T) *shard { terminated := make(chan struct{}) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { shard.process() close(terminated) - }() + }) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { slots.process(terminated) - }() + }) return shard } diff --git a/pkg/sharky/sharky_test.go b/pkg/sharky/sharky_test.go index 268a996e7c0..1d4b78d51c4 100644 --- a/pkg/sharky/sharky_test.go +++ b/pkg/sharky/sharky_test.go @@ -185,11 +185,11 @@ func TestConcurrency(t *testing.T) { ctx := context.Background() eg, ectx := errgroup.WithContext(ctx) // a number of workers write sequential numbers to sharky - for k := 0; k < workers; k++ { + for k := range workers { eg.Go(func() error { <-start buf := make([]byte, 4) - for i := 0; i < limit; i++ { + for i := range limit { j := i*workers + k binary.BigEndian.PutUint32(buf, uint32(j)) loc, err := s.Write(ctx, buf) @@ -212,7 +212,7 @@ func TestConcurrency(t *testing.T) { eg.Go(func() error { <-start buf := make([]byte, datasize) - for i := 0; i < limit; i++ { + for range limit { select { case <-ectx.Done(): return ectx.Err() diff --git a/pkg/sharky/slots.go b/pkg/sharky/slots.go index 1f5294f79b6..71d058f2b88 100644 --- a/pkg/sharky/slots.go +++ b/pkg/sharky/slots.go @@ -58,7 +58,7 @@ func (sl *slots) save() error { // extensions are bytewise: can only be multiples of 8 bits func (sl *slots) extend(n int) { sl.size += uint32(n) * 8 - for i := 0; i < n; i++ { + for range n { sl.data = append(sl.data, 0xff) } } @@ -123,12 +123,10 @@ func (sl *slots) process(quit chan struct{}) { out = nil } quit = nil - sl.wg.Add(1) - go func() { - defer sl.wg.Done() + sl.wg.Go(func() { sl.limboWG.Wait() close(sl.in) - }() + }) } } } diff --git a/pkg/sharky/store.go b/pkg/sharky/store.go index 59549ee4a4a..d8947f9d042 100644 --- a/pkg/sharky/store.go +++ b/pkg/sharky/store.go @@ -100,17 +100,13 @@ func (s *Store) create(index uint8, maxDataSize int, basedir fs.FS) (*shard, err quit: s.quit, } terminated := make(chan struct{}) - sh.slots.wg.Add(1) - go func() { - defer sh.slots.wg.Done() + s.wg.Go(func() { sh.process() close(terminated) - }() - sh.slots.wg.Add(1) - go func() { - defer sh.slots.wg.Done() + }) + s.wg.Go(func() { sl.process(terminated) - }() + }) return sh, nil } diff --git a/pkg/shed/example_store_test.go b/pkg/shed/example_store_test.go index 6d77ce907a7..a6adbb2c703 100644 --- a/pkg/shed/example_store_test.go +++ b/pkg/shed/example_store_test.go @@ -238,7 +238,7 @@ func (s *Store) CollectGarbage() (err error) { maxRounds := 10 // arbitrary number, needs to be calculated // Run a few gc rounds. - for roundCount := 0; roundCount < maxRounds; roundCount++ { + for range maxRounds { var garbageCount int // New batch for a new cg round. trash := new(leveldb.Batch) diff --git a/pkg/shed/field_struct.go b/pkg/shed/field_struct.go index 4f909c17b21..9e6f7139800 100644 --- a/pkg/shed/field_struct.go +++ b/pkg/shed/field_struct.go @@ -45,7 +45,7 @@ func (db *DB) NewStructField(name string) (f StructField, err error) { // Get unmarshals data from the database to a provided val. // If the data is not found leveldb.ErrNotFound is returned. -func (f StructField) Get(val interface{}) (err error) { +func (f StructField) Get(val any) (err error) { b, err := f.db.Get(f.key) if err != nil { return err @@ -54,7 +54,7 @@ func (f StructField) Get(val interface{}) (err error) { } // Put marshals provided val and saves it to the database. -func (f StructField) Put(val interface{}) (err error) { +func (f StructField) Put(val any) (err error) { b, err := json.Marshal(val) if err != nil { return err @@ -63,7 +63,7 @@ func (f StructField) Put(val interface{}) (err error) { } // PutInBatch marshals provided val and puts it into the batch. -func (f StructField) PutInBatch(batch *leveldb.Batch, val interface{}) (err error) { +func (f StructField) PutInBatch(batch *leveldb.Batch, val any) (err error) { b, err := json.Marshal(val) if err != nil { return err diff --git a/pkg/statestore/leveldb/leveldb.go b/pkg/statestore/leveldb/leveldb.go index 08d757ce288..8ddd71d0215 100644 --- a/pkg/statestore/leveldb/leveldb.go +++ b/pkg/statestore/leveldb/leveldb.go @@ -76,7 +76,7 @@ func NewStateStore(path string, l log.Logger) (*Store, error) { // Get retrieves a value of the requested key. If no results are found, // storage.ErrNotFound will be returned. -func (s *Store) Get(key string, i interface{}) error { +func (s *Store) Get(key string, i any) error { data, err := s.db.Get([]byte(key), nil) if err != nil { if errors.Is(err, leveldb.ErrNotFound) { @@ -95,7 +95,7 @@ func (s *Store) Get(key string, i interface{}) error { // Put stores a value for an arbitrary key. BinaryMarshaler // interface method will be called on the provided value // with fallback to JSON serialization. -func (s *Store) Put(key string, i interface{}) (err error) { +func (s *Store) Put(key string, i any) (err error) { var bytes []byte if marshaler, ok := i.(encoding.BinaryMarshaler); ok { if bytes, err = marshaler.MarshalBinary(); err != nil { diff --git a/pkg/statestore/mock/store.go b/pkg/statestore/mock/store.go index a3d34542f2c..7c30fca12b5 100644 --- a/pkg/statestore/mock/store.go +++ b/pkg/statestore/mock/store.go @@ -28,7 +28,7 @@ func NewStateStore() storage.StateStorer { return s } -func (s *store) Get(key string, i interface{}) (err error) { +func (s *store) Get(key string, i any) (err error) { s.mtx.RLock() defer s.mtx.RUnlock() @@ -44,7 +44,7 @@ func (s *store) Get(key string, i interface{}) (err error) { return json.Unmarshal(data, i) } -func (s *store) Put(key string, i interface{}) (err error) { +func (s *store) Put(key string, i any) (err error) { s.mtx.Lock() defer s.mtx.Unlock() diff --git a/pkg/statestore/storeadapter/storeadapter.go b/pkg/statestore/storeadapter/storeadapter.go index 1deda6c6cd3..95e775bffb2 100644 --- a/pkg/statestore/storeadapter/storeadapter.go +++ b/pkg/statestore/storeadapter/storeadapter.go @@ -25,7 +25,7 @@ var _ storage.Item = (*proxyItem)(nil) type proxyItem struct { ns string key string - obj interface{} + obj any } // ID implements Item interface. @@ -91,7 +91,7 @@ func (pi proxyItem) String() string { } // newProxyItem creates a new proxyItem. -func newProxyItem(key string, obj interface{}) *proxyItem { +func newProxyItem(key string, obj any) *proxyItem { return &proxyItem{ns: stateStoreNamespace, key: key, obj: obj} } @@ -146,12 +146,12 @@ func (s *StateStorerAdapter) Close() error { } // Get implements StateStorer interface. -func (s *StateStorerAdapter) Get(key string, obj interface{}) (err error) { +func (s *StateStorerAdapter) Get(key string, obj any) (err error) { return s.storage.Get(newProxyItem(key, obj)) } // Put implements StateStorer interface. -func (s *StateStorerAdapter) Put(key string, obj interface{}) (err error) { +func (s *StateStorerAdapter) Put(key string, obj any) (err error) { return s.storage.Put(newProxyItem(key, obj)) } diff --git a/pkg/statestore/test/store.go b/pkg/statestore/test/store.go index 10bc9c15c83..48cb487edfe 100644 --- a/pkg/statestore/test/store.go +++ b/pkg/statestore/test/store.go @@ -159,7 +159,7 @@ func insertValues(t *testing.T, store storage.StateStorer, key1, key2 string, va func insert(t *testing.T, store storage.StateStorer, prefix string, count int) { t.Helper() - for i := 0; i < count; i++ { + for i := range count { k := prefix + fmt.Sprint(i) err := store.Put(k, i) diff --git a/pkg/storage/inmemstore/inmemstore.go b/pkg/storage/inmemstore/inmemstore.go index b0052602aa1..88076df2722 100644 --- a/pkg/storage/inmemstore/inmemstore.go +++ b/pkg/storage/inmemstore/inmemstore.go @@ -104,7 +104,7 @@ func (s *Store) Count(k storage.Key) (int, error) { defer s.mu.RUnlock() count := 0 - s.st.WalkPrefix(k.Namespace(), func(_ string, _ interface{}) bool { + s.st.WalkPrefix(k.Namespace(), func(_ string, _ any) bool { count++ return false }) @@ -116,7 +116,7 @@ func (s *Store) Iterate(q storage.Query, fn storage.IterateFn) error { return fmt.Errorf("failed iteration: %w", err) } - getNext := func(k string, v interface{}) (*storage.Result, error) { + getNext := func(k string, v any) (*storage.Result, error) { for _, filter := range q.Filters { if filter(idFromKey(k, q.Factory().Namespace()), v.([]byte)) { return nil, nil @@ -155,7 +155,7 @@ func (s *Store) Iterate(q storage.Query, fn storage.IterateFn) error { switch q.Order { case storage.KeyAscendingOrder: - s.st.WalkPrefix(prefix, func(k string, v interface{}) bool { + s.st.WalkPrefix(prefix, func(k string, v any) bool { if q.PrefixAtStart && !skipUntil { if k >= prefix+separator+q.Prefix { @@ -195,7 +195,7 @@ func (s *Store) Iterate(q storage.Query, fn storage.IterateFn) error { // For now, inmem implementation is not meant to work for large datasets, so first option // is chosen. results := make([]storage.Result, 0) - s.st.WalkPrefix(prefix, func(k string, v interface{}) bool { + s.st.WalkPrefix(prefix, func(k string, v any) bool { res, err := getNext(k, v) if err != nil { retErr = errors.Join(retErr, err) diff --git a/pkg/storage/migration/index_test.go b/pkg/storage/migration/index_test.go index ec35859d01f..2247c36ebef 100644 --- a/pkg/storage/migration/index_test.go +++ b/pkg/storage/migration/index_test.go @@ -326,7 +326,7 @@ func TestOptions(t *testing.T) { func populateStore(t *testing.T, s storage.Store, count int) { t.Helper() - for i := 0; i < count; i++ { + for i := range count { item := &obj{id: i, val: i} if err := s.Put(item); err != nil { t.Fatalf("populate store should succeed: %v", err) diff --git a/pkg/storage/migration/steps_chain_test.go b/pkg/storage/migration/steps_chain_test.go index e5a69917f65..327a41331d7 100644 --- a/pkg/storage/migration/steps_chain_test.go +++ b/pkg/storage/migration/steps_chain_test.go @@ -22,7 +22,7 @@ func TestNewStepsChain(t *testing.T) { stepsFn := make([]migration.StepFn, 0) // Create 10 step functions where each would remove single element, having value [0-10) - for i := 0; i < 10; i++ { + for i := range 10 { valForRemoval := i var stepFn migration.StepFn diff --git a/pkg/storage/statestore.go b/pkg/storage/statestore.go index 53520dea1be..b04d36d3551 100644 --- a/pkg/storage/statestore.go +++ b/pkg/storage/statestore.go @@ -16,10 +16,10 @@ type StateStorer interface { io.Closer // Get unmarshalls object with the given key into the given obj. - Get(key string, obj interface{}) error + Get(key string, obj any) error // Put inserts or updates the given obj stored under the given key. - Put(key string, obj interface{}) error + Put(key string, obj any) error // Delete removes object form the store stored under the given key. Delete(key string) error diff --git a/pkg/storage/storagetest/benchmark.go b/pkg/storage/storagetest/benchmark.go index fe2fe3dec12..8d051a06391 100644 --- a/pkg/storage/storagetest/benchmark.go +++ b/pkg/storage/storagetest/benchmark.go @@ -37,7 +37,7 @@ const ( func randomBytes(r *rand.Rand, n int) []byte { b := make([]byte, n) - for i := 0; i < n; i++ { + for i := range n { b[i] = ' ' + byte(r.Intn('~'-' '+1)) } return b @@ -104,7 +104,7 @@ func newStartAtEntryGenerator(start int, g entryGenerator) entryGenerator { func newSequentialKeys(size int, start int, keyFormat string) [][]byte { keys := make([][]byte, size) buffer := make([]byte, size*keyLen) - for i := 0; i < size; i++ { + for i := range size { begin, end := i*keyLen, (i+1)*keyLen key := buffer[begin:begin:end] _, _ = fmt.Fprintf(bytes.NewBuffer(key), keyFormat, start+i) @@ -117,7 +117,7 @@ func newRandomKeys(n int, format string) [][]byte { r := rand.New(rand.NewSource(time.Now().Unix())) keys := make([][]byte, n) buffer := make([]byte, n*keyLen) - for i := 0; i < n; i++ { + for i := range n { begin, end := i*keyLen, (i+1)*keyLen key := buffer[begin:begin:end] _, _ = fmt.Fprintf(bytes.NewBuffer(key), format, r.Intn(n)) @@ -129,7 +129,7 @@ func newRandomKeys(n int, format string) [][]byte { func newFullRandomKeys(size int, start int, format string) [][]byte { keys := newSequentialKeys(size, start, format) r := rand.New(rand.NewSource(time.Now().Unix())) - for i := 0; i < size; i++ { + for i := range size { j := r.Intn(size) keys[i], keys[j] = keys[j], keys[i] } diff --git a/pkg/storage/storagetest/storage.go b/pkg/storage/storagetest/storage.go index e40e6bb9f20..9ee41971b43 100644 --- a/pkg/storage/storagetest/storage.go +++ b/pkg/storage/storagetest/storage.go @@ -984,7 +984,7 @@ func BenchmarkWriteRandom(b *testing.B, db storage.Store) { start, step := 0, (b.N+parallelism)/parallelism n := step * parallelism g := newFullRandomEntryGenerator(0, n) - for i := 0; i < parallelism; i++ { + for range parallelism { gens = append(gens, newStartAtEntryGenerator(start, g)) start += step } diff --git a/pkg/storage/testing/chunk.go b/pkg/storage/testing/chunk.go index 6c625aceedd..512cf1746c6 100644 --- a/pkg/storage/testing/chunk.go +++ b/pkg/storage/testing/chunk.go @@ -76,7 +76,7 @@ func GenerateTestRandomInvalidChunk() swarm.Chunk { // Chunks by using GenerateTestRandomChunk function. func GenerateTestRandomChunks(count int) []swarm.Chunk { chunks := make([]swarm.Chunk, count) - for i := 0; i < count; i++ { + for i := range count { chunks[i] = GenerateTestRandomChunk() } return chunks diff --git a/pkg/storageincentives/events_test.go b/pkg/storageincentives/events_test.go index 0ea4547a145..f5a00d05c5b 100644 --- a/pkg/storageincentives/events_test.go +++ b/pkg/storageincentives/events_test.go @@ -41,7 +41,7 @@ func TestClose(t *testing.T) { ev.Close() - for i := 0; i < 3; i++ { + for range 3 { select { case <-done1: case <-done2: @@ -82,7 +82,7 @@ func TestPhaseCancel(t *testing.T) { ev.Publish(2) ev.Publish(3) - for i := 0; i < 2; i++ { + for range 2 { select { case <-done1: case <-done2: diff --git a/pkg/storageincentives/proof_test.go b/pkg/storageincentives/proof_test.go index de7e14f4e13..b0fcc8c8952 100644 --- a/pkg/storageincentives/proof_test.go +++ b/pkg/storageincentives/proof_test.go @@ -68,7 +68,7 @@ func TestMakeInclusionProofsRegression(t *testing.T) { // generate chunks that will be used as sample sampleChunks := make([]swarm.Chunk, 0, sampleSize) - for i := 0; i < sampleSize; i++ { + for i := range sampleSize { ch, err := cac.New(fmt.Appendf(nil, "Unstoppable data! Chunk #%d", i+1)) if err != nil { t.Fatal(err) diff --git a/pkg/storageincentives/redistributionstate_test.go b/pkg/storageincentives/redistributionstate_test.go index 063991c31d8..38c3ae8280d 100644 --- a/pkg/storageincentives/redistributionstate_test.go +++ b/pkg/storageincentives/redistributionstate_test.go @@ -199,7 +199,7 @@ func TestPurgeRoundData(t *testing.T) { hasRoundData := make([]bool, roundsCount) // Populate data at random rounds - for i := uint64(0); i < roundsCount; i++ { + for i := range uint64(roundsCount) { v := rand.Int()%2 == 0 hasRoundData[i] = v if v { @@ -210,7 +210,7 @@ func TestPurgeRoundData(t *testing.T) { // Run purge successively and assert that all data is purged up to // currentRound - purgeDataOlderThenXRounds - for i := uint64(0); i < roundsCount; i++ { + for i := range uint64(roundsCount) { state.SetCurrentEvent(0, i) state.purgeStaleRoundData() @@ -229,7 +229,7 @@ func TestPurgeRoundData(t *testing.T) { state.purgeStaleRoundData() // One more time assert that everything was purged - for i := uint64(0); i < roundsCount; i++ { + for i := range uint64(roundsCount) { assertHasDataAtRound(i, false) } } diff --git a/pkg/storageincentives/soc_mine_test.go b/pkg/storageincentives/soc_mine_test.go index 0e3a5090f47..4f3cd7812e8 100644 --- a/pkg/storageincentives/soc_mine_test.go +++ b/pkg/storageincentives/soc_mine_test.go @@ -46,7 +46,7 @@ func TestSocMine(t *testing.T) { // this constant is for a minimum reserve size of 2 million chunks with sample size of 16 // = 1.284401 * 10^71 = 1284401 + 66 0-s mstring := "1284401" - for i := 0; i < 66; i++ { + for range 66 { mstring = mstring + "0" } n, ok := new(big.Int).SetString(mstring, 10) @@ -111,7 +111,7 @@ func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAdd // the main loop terminating after sampleSize SOCs have been generated eg.Go(func() error { defer cancel() - for i := 0; i < sampleSize; i++ { + for i := range sampleSize { select { case sample[i] = <-sampleC: case <-ectx.Done(): @@ -125,7 +125,7 @@ func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAdd // loop to start mining workers count := 8 // number of parallel workers wg := sync.WaitGroup{} - for i := 0; i < count; i++ { + for i := range count { wg.Add(1) eg.Go(func() (err error) { offset := i * 4 diff --git a/pkg/storer/compact.go b/pkg/storer/compact.go index 20bf9e027e9..32a3f60dc67 100644 --- a/pkg/storer/compact.go +++ b/pkg/storer/compact.go @@ -51,7 +51,7 @@ func Compact(ctx context.Context, basePath string, opts *Options, validate bool) n := time.Now() - for shard := 0; shard < sharkyNoOfShards; shard++ { + for shard := range sharkyNoOfShards { select { case <-ctx.Done(): diff --git a/pkg/storer/compact_test.go b/pkg/storer/compact_test.go index 3fb58f22377..deb8b7412c4 100644 --- a/pkg/storer/compact_test.go +++ b/pkg/storer/compact_test.go @@ -37,14 +37,15 @@ func TestCompact(t *testing.T) { } st.StartReserveWorker(ctx, pullerMock.NewMockRateReporter(0), networkRadiusFunc(0)) - var chunks []swarm.Chunk batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()} evictBatch := batches[1] putter := st.ReservePutter() - for b := 0; b < len(batches); b++ { - for i := uint64(0); i < 100; i++ { + chunks := make([]swarm.Chunk, 0, len(batches)*100) + + for b := range batches { + for range uint64(100) { ch := chunk.GenerateTestRandomChunk() ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID)) chunks = append(chunks, ch) @@ -81,7 +82,7 @@ func TestCompact(t *testing.T) { } putter = st.ReservePutter() - for i := uint64(0); i < 100; i++ { + for range uint64(100) { ch := chunk.GenerateTestRandomChunk() ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[0].ID)) chunks = append(chunks, ch) @@ -135,13 +136,14 @@ func TestCompactNoEvictions(t *testing.T) { } st.StartReserveWorker(ctx, pullerMock.NewMockRateReporter(0), networkRadiusFunc(0)) - var chunks []swarm.Chunk batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()} putter := st.ReservePutter() - for b := 0; b < len(batches); b++ { - for i := uint64(0); i < 100; i++ { + chunks := make([]swarm.Chunk, 0, len(batches)*100) + + for b := range batches { + for range uint64(100) { ch := chunk.GenerateTestRandomChunk() ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID)) chunks = append(chunks, ch) @@ -167,7 +169,7 @@ func TestCompactNoEvictions(t *testing.T) { } putter = st.ReservePutter() - for i := uint64(0); i < 100; i++ { + for range uint64(100) { ch := chunk.GenerateTestRandomChunk() ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[0].ID)) chunks = append(chunks, ch) diff --git a/pkg/storer/debug_test.go b/pkg/storer/debug_test.go index 1e8149ba795..8e68b8ac972 100644 --- a/pkg/storer/debug_test.go +++ b/pkg/storer/debug_test.go @@ -157,7 +157,7 @@ func testDebugInfo(t *testing.T, newStorer func() (*storer.DB, swarm.Address, er putter := lstore.ReservePutter() - for i := 0; i < 10; i++ { + for range 10 { chunk := chunktest.GenerateTestRandomChunkAt(t, addr, 0) err := putter.Put(context.Background(), chunk) if err != nil { diff --git a/pkg/storer/export_test.go b/pkg/storer/export_test.go index 83c0bd62657..b30b5996edd 100644 --- a/pkg/storer/export_test.go +++ b/pkg/storer/export_test.go @@ -22,11 +22,11 @@ func ReplaceSharkyShardLimit(val int) { } func (db *DB) WaitForBgCacheWorkers() (unblock func()) { - for i := 0; i < defaultBgCacheWorkers; i++ { + for range defaultBgCacheWorkers { db.cacheLimiter.sem <- struct{}{} } return func() { - for i := 0; i < defaultBgCacheWorkers; i++ { + for range defaultBgCacheWorkers { <-db.cacheLimiter.sem } } diff --git a/pkg/storer/internal/cache/cache_test.go b/pkg/storer/internal/cache/cache_test.go index 79536960d94..580263cb6ac 100644 --- a/pkg/storer/internal/cache/cache_test.go +++ b/pkg/storer/internal/cache/cache_test.go @@ -211,7 +211,7 @@ func TestCache(t *testing.T) { }) t.Run("not in chunkstore returns error", func(t *testing.T) { - for i := 0; i < 5; i++ { + for range 5 { unknownChunk := chunktest.GenerateTestRandomChunk() _, err := c.Getter(st).Get(context.TODO(), unknownChunk.Address()) if !errors.Is(err, storage.ErrNotFound) { @@ -223,7 +223,7 @@ func TestCache(t *testing.T) { t.Run("not in cache doesn't affect state", func(t *testing.T) { state := c.State(st.IndexStore()) - for i := 0; i < 5; i++ { + for range 5 { extraChunk := chunktest.GenerateTestRandomChunk() err := st.Run(context.Background(), func(s transaction.Store) error { return s.ChunkStore().Put(context.TODO(), extraChunk) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index 67a0043ccf5..e90fa27b999 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -634,7 +634,7 @@ func (r *Reserve) Reset(ctx context.Context) error { // step 4: delete binItems err = r.st.Run(context.Background(), func(s transaction.Store) error { - for i := uint8(0); i < swarm.MaxBins; i++ { + for i := range swarm.MaxBins { err := s.IndexStore().Delete(&BinItem{Bin: i}) if err != nil { return err @@ -691,7 +691,7 @@ func (r *Reserve) LastBinIDs() ([]uint64, uint64, error) { ids := make([]uint64, swarm.MaxBins) - for bin := uint8(0); bin < swarm.MaxBins; bin++ { + for bin := range swarm.MaxBins { binItem := &BinItem{Bin: bin} err := r.st.IndexStore().Get(binItem) if err != nil { diff --git a/pkg/storer/internal/reserve/reserve_test.go b/pkg/storer/internal/reserve/reserve_test.go index 4633c140385..43c2252d736 100644 --- a/pkg/storer/internal/reserve/reserve_test.go +++ b/pkg/storer/internal/reserve/reserve_test.go @@ -48,7 +48,7 @@ func TestReserve(t *testing.T) { t.Fatal(err) } - for b := 0; b < 2; b++ { + for b := range 2 { for i := 1; i < 51; i++ { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) err := r.Put(context.Background(), ch) @@ -101,7 +101,7 @@ func TestReserveChunkType(t *testing.T) { storedChunksCA := 0 storedChunksSO := 0 - for i := 0; i < 100; i++ { + for range 100 { ch := chunk.GenerateTestRandomChunk() if rand.Intn(2) == 0 { storedChunksCA++ @@ -552,8 +552,8 @@ func TestEvict(t *testing.T) { t.Fatal(err) } - for i := 0; i < chunksPerBatch; i++ { - for b := 0; b < 3; b++ { + for range chunksPerBatch { + for b := range 3 { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b).WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID)) chunks = append(chunks, ch) err := r.Put(context.Background(), ch) @@ -564,7 +564,7 @@ func TestEvict(t *testing.T) { } totalEvicted := 0 - for i := 0; i < 3; i++ { + for i := range 3 { evicted, err := r.EvictBatchBin(context.Background(), evictBatch.ID, math.MaxInt, uint8(i)) if err != nil { t.Fatal(err) @@ -623,9 +623,9 @@ func TestEvictSOC(t *testing.T) { batch := postagetesting.MustNewBatch() signer := getSigner(t) - var chunks []swarm.Chunk + chunks := make([]swarm.Chunk, 0, 10) - for i := 0; i < 10; i++ { + for i := range 10 { ch := soctesting.GenerateMockSocWithSigner(t, []byte{byte(i)}, signer).Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, uint64(i), uint64(i))) chunks = append(chunks, ch) err := r.Put(context.Background(), ch) @@ -694,8 +694,8 @@ func TestEvictMaxCount(t *testing.T) { batch := postagetesting.MustNewBatch() - for b := 0; b < 2; b++ { - for i := 0; i < 10; i++ { + for b := range 2 { + for range 10 { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b).WithStamp(postagetesting.MustNewBatchStamp(batch.ID)) chunks = append(chunks, ch) err := r.Put(context.Background(), ch) @@ -750,8 +750,8 @@ func TestIterate(t *testing.T) { t.Fatal(err) } - for b := 0; b < 3; b++ { - for i := 0; i < 10; i++ { + for b := range 3 { + for range 10 { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) err := r.Put(context.Background(), ch) if err != nil { @@ -868,7 +868,7 @@ func TestReset(t *testing.T) { total = bins * chunksPerBin ) - for b := 0; b < bins; b++ { + for b := range bins { for i := 1; i <= chunksPerBin; i++ { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) err := r.Put(context.Background(), ch) @@ -995,7 +995,7 @@ func TestEvictRemovesPinnedContent(t *testing.T) { batch := postagetesting.MustNewBatch() chunks := make([]swarm.Chunk, numChunks) - for i := 0; i < numChunks; i++ { + for i := range numChunks { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewBatchStamp(batch.ID)) chunks[i] = ch diff --git a/pkg/storer/migration/refCntSize_test.go b/pkg/storer/migration/refCntSize_test.go index bfbc8c687e7..d5e5424283e 100644 --- a/pkg/storer/migration/refCntSize_test.go +++ b/pkg/storer/migration/refCntSize_test.go @@ -24,8 +24,8 @@ func Test_RefCntSize(t *testing.T) { store := inmemstore.New() // simulate old cacheEntryItem with some random bytes. - var oldItems []*localmigration.OldRetrievalIndexItem - for i := 0; i < 10; i++ { + oldItems := make([]*localmigration.OldRetrievalIndexItem, 0, 10) + for range 10 { entry := &localmigration.OldRetrievalIndexItem{ Address: swarm.RandAddress(t), Timestamp: uint64(rand.Int()), diff --git a/pkg/storer/migration/reserveRepair.go b/pkg/storer/migration/reserveRepair.go index 938e6b04b28..b8378d92550 100644 --- a/pkg/storer/migration/reserveRepair.go +++ b/pkg/storer/migration/reserveRepair.go @@ -84,7 +84,7 @@ func ReserveRepairer( // STEP 1 err = st.Run(context.Background(), func(s transaction.Store) error { - for i := uint8(0); i < swarm.MaxBins; i++ { + for i := range swarm.MaxBins { err := s.IndexStore().Delete(&reserve.BinItem{Bin: i}) if err != nil { return err diff --git a/pkg/storer/migration/reserveRepair_test.go b/pkg/storer/migration/reserveRepair_test.go index 884664c8787..b60f51769eb 100644 --- a/pkg/storer/migration/reserveRepair_test.go +++ b/pkg/storer/migration/reserveRepair_test.go @@ -33,15 +33,15 @@ func TestReserveRepair(t *testing.T) { var chunksPO = make([][]swarm.Chunk, 5) var chunksPerPO uint64 = 2 - for i := uint8(0); i < swarm.MaxBins; i++ { + for i := range swarm.MaxBins { err := store.Run(context.Background(), func(s transaction.Store) error { return s.IndexStore().Put(&reserve.BinItem{Bin: i, BinID: 10}) }) assert.NoError(t, err) } - for b := 0; b < 5; b++ { - for i := uint64(0); i < chunksPerPO; i++ { + for b := range 5 { + for range chunksPerPO { ch := chunktest.GenerateTestRandomChunkAt(t, baseAddr, b) stampHash, err := ch.Stamp().Hash() if err != nil { @@ -115,7 +115,7 @@ func TestReserveRepair(t *testing.T) { ) assert.NoError(t, err) - for b := 0; b < 5; b++ { + for b := range 5 { if b < 2 { if _, found := binIDs[uint8(b)]; found { t.Fatalf("bin %d should not have any binIDs", b) diff --git a/pkg/storer/migration/step_02_test.go b/pkg/storer/migration/step_02_test.go index 32cfa9abe4d..c1c741ac2fa 100644 --- a/pkg/storer/migration/step_02_test.go +++ b/pkg/storer/migration/step_02_test.go @@ -54,8 +54,8 @@ func Test_Step_02(t *testing.T) { store := internal.NewInmemStorage() // simulate old cacheEntryItem with some random bytes. - var addrs []*testEntry - for i := 0; i < 10; i++ { + addrs := make([]*testEntry, 0, 10) + for range 10 { entry := &testEntry{address: swarm.RandAddress(t)} addrs = append(addrs, entry) err := store.Run(context.Background(), func(s transaction.Store) error { diff --git a/pkg/storer/migration/step_04_test.go b/pkg/storer/migration/step_04_test.go index 758ddc7987f..fcf54225af4 100644 --- a/pkg/storer/migration/step_04_test.go +++ b/pkg/storer/migration/step_04_test.go @@ -84,7 +84,7 @@ func Test_Step_04(t *testing.T) { _, err = f.Read(buf) assert.NoError(t, err) - for i := 0; i < 10; i++ { + for i := range 10 { if i < 2 { // if the chunk is deleted, the bit is set to 1 assert.Greater(t, buf[i/8]&(1<<(i%8)), byte(0)) diff --git a/pkg/storer/reserve.go b/pkg/storer/reserve.go index 2ac9d318623..74a3b4286ce 100644 --- a/pkg/storer/reserve.go +++ b/pkg/storer/reserve.go @@ -466,9 +466,7 @@ func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan done := make(chan struct{}) errC := make(chan error, 1) - db.inFlight.Add(1) - go func() { - defer db.inFlight.Done() + db.inFlight.Go(func() { trigger, unsub := db.reserveBinEvents.Subscribe(string(bin)) defer unsub() @@ -507,7 +505,7 @@ func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan return } } - }() + }) var doneOnce sync.Once return out, func() { @@ -555,7 +553,7 @@ func neighborhoodPrefixes(base swarm.Address, radius int, suffixLength int) []sw bitCombinationsCount := int(math.Pow(2, float64(suffixLength))) bitSuffixes := make([]uint8, bitCombinationsCount) - for i := 0; i < bitCombinationsCount; i++ { + for i := range bitCombinationsCount { bitSuffixes[i] = uint8(i) } diff --git a/pkg/storer/reserve_test.go b/pkg/storer/reserve_test.go index c354f92cdbe..3126a45015c 100644 --- a/pkg/storer/reserve_test.go +++ b/pkg/storer/reserve_test.go @@ -198,8 +198,8 @@ func TestEvictBatch(t *testing.T) { putter := st.ReservePutter() - for b := 0; b < 3; b++ { - for i := uint64(0); i < chunksPerPO; i++ { + for b := range 3 { + for range chunksPerPO { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID)) chunks = append(chunks, ch) @@ -290,8 +290,8 @@ func TestUnreserveCap(t *testing.T) { c, unsub := storer.Events().Subscribe("reserveUnreserved") defer unsub() - for b := 0; b < 5; b++ { - for i := uint64(0); i < chunksPerPO; i++ { + for b := range 5 { + for range chunksPerPO { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batch.ID)) chunksPO[b] = append(chunksPO[b], ch) @@ -438,8 +438,8 @@ func TestRadiusManager(t *testing.T) { putter := storer.ReservePutter() - for i := 0; i < 4; i++ { - for j := 0; j < 10; j++ { + for i := range 4 { + for range 10 { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, i).WithStamp(postagetesting.MustNewBatchStamp(batch.ID)) err := putter.Put(context.Background(), ch) if err != nil { @@ -480,8 +480,8 @@ func TestSubscribeBin(t *testing.T) { putter = storer.ReservePutter() ) - for j := 0; j < 2; j++ { - for i := uint64(0); i < chunksPerPO; i++ { + for j := range 2 { + for range chunksPerPO { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, j) chunks = append(chunks, ch) err := putter.Put(context.Background(), ch) @@ -597,8 +597,8 @@ func TestSubscribeBinTrigger(t *testing.T) { ) putter := storer.ReservePutter() - for j := 0; j < 2; j++ { - for i := uint64(0); i < chunksPerPO; i++ { + for j := range 2 { + for range chunksPerPO { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, j) chunks = append(chunks, ch) err := putter.Put(context.Background(), ch) @@ -694,7 +694,7 @@ func TestNeighborhoodStats(t *testing.T) { putChunks := func(addr swarm.Address, startingRadius int, st *storer.DB) { putter := st.ReservePutter() - for i := 0; i < chunkCountPerPO; i++ { + for range chunkCountPerPO { ch := chunk.GenerateValidRandomChunkAt(t, addr, startingRadius) err := putter.Put(context.Background(), ch) if err != nil { diff --git a/pkg/storer/sample_test.go b/pkg/storer/sample_test.go index fd39882d367..49745c4b24f 100644 --- a/pkg/storer/sample_test.go +++ b/pkg/storer/sample_test.go @@ -25,8 +25,8 @@ func TestReserveSampler(t *testing.T) { randChunks := func(baseAddr swarm.Address, timeVar uint64) []swarm.Chunk { var chs []swarm.Chunk - for po := 0; po < maxPO; po++ { - for i := 0; i < chunkCountPerPO; i++ { + for po := range maxPO { + for range chunkCountPerPO { ch := chunk.GenerateValidRandomChunkAt(t, baseAddr, po).WithBatch(3, 2, false) if rand.Intn(2) == 0 { // 50% chance to wrap CAC into SOC ch = chunk.GenerateTestRandomSoChunk(t, ch) @@ -155,7 +155,7 @@ func TestReserveSamplerSisterNeighborhood(t *testing.T) { randChunks := func(baseAddr swarm.Address, startingRadius int, timeVar uint64) []swarm.Chunk { var chs []swarm.Chunk for po := startingRadius; po < maxPO; po++ { - for i := 0; i < chunkCountPerPO; i++ { + for range chunkCountPerPO { ch := chunk.GenerateValidRandomChunkAt(t, baseAddr, po).WithBatch(3, 2, false) if rand.Intn(2) == 0 { // 50% chance to wrap CAC into SOC ch = chunk.GenerateTestRandomSoChunk(t, ch) diff --git a/pkg/storer/subscribe_push.go b/pkg/storer/subscribe_push.go index bd34956b387..237b155b66c 100644 --- a/pkg/storer/subscribe_push.go +++ b/pkg/storer/subscribe_push.go @@ -23,9 +23,7 @@ func (db *DB) SubscribePush(ctx context.Context) (<-chan swarm.Chunk, func()) { stopChanOnce sync.Once ) - db.subscriptionsWG.Add(1) - go func() { - defer db.subscriptionsWG.Done() + db.subscriptionsWG.Go(func() { trigger, unsub := db.events.Subscribe(subscribePushEventKey) defer unsub() @@ -80,7 +78,7 @@ func (db *DB) SubscribePush(ctx context.Context) (<-chan swarm.Chunk, func()) { // wait for the next event } } - }() + }) stop := func() { stopChanOnce.Do(func() { diff --git a/pkg/storer/uploadstore_test.go b/pkg/storer/uploadstore_test.go index 095904ade7d..fc53e5281d6 100644 --- a/pkg/storer/uploadstore_test.go +++ b/pkg/storer/uploadstore_test.go @@ -196,7 +196,7 @@ func testUploadStore(t *testing.T, newStorer func() (*storer.DB, error)) { chunks := chunktesting.GenerateTestRandomChunks(10) for _, ch := range chunks { - for i := 0; i < 2; i++ { + for range 2 { err := session.Put(context.TODO(), ch) if err != nil { t.Fatalf("session.Put(...): unexpected error: %v", err) @@ -273,7 +273,7 @@ func testListDeleteSessions(t *testing.T, newStorer func() (*storer.DB, error)) t.Fatal(err) } - for i := 0; i < 10; i++ { + for range 10 { _, err := lstore.NewSession() if err != nil { t.Fatalf("NewSession(): unexpected error: %v", err) diff --git a/pkg/storer/validate.go b/pkg/storer/validate.go index 0220c4fb899..01f6904d02c 100644 --- a/pkg/storer/validate.go +++ b/pkg/storer/validate.go @@ -153,15 +153,13 @@ func validateWork(logger log.Logger, store storage.Store, readFn func(context.Co var wg sync.WaitGroup - for i := 0; i < 8; i++ { - wg.Add(1) - go func() { - defer wg.Done() + for range 8 { + wg.Go(func() { buf := make([]byte, swarm.SocMaxChunkSize) for item := range iteratateItemsC { validChunk(item, buf[:item.Location.Length]) } - }() + }) } count := 0 @@ -331,10 +329,8 @@ func (p *PinIntegrity) Check(ctx context.Context, logger log.Logger, pin string, iteratateItemsC := make(chan *chunkstore.RetrievalIndexItem) - for i := 0; i < 8; i++ { - wg.Add(1) - go func() { - defer wg.Done() + for range 8 { + wg.Go(func() { buf := make([]byte, swarm.SocMaxChunkSize) for item := range iteratateItemsC { if ctx.Err() != nil { @@ -344,7 +340,7 @@ func (p *PinIntegrity) Check(ctx context.Context, logger log.Logger, pin string, invalid.Add(1) } } - }() + }) } var count, micrs int64 diff --git a/pkg/swarm/proximity.go b/pkg/swarm/proximity.go index d68b9cd4795..43fefa81de2 100644 --- a/pkg/swarm/proximity.go +++ b/pkg/swarm/proximity.go @@ -28,7 +28,7 @@ func Proximity(one, other []byte) (ret uint8) { var m uint8 = 8 for i := uint8(0); i < b; i++ { oxo := one[i] ^ other[i] - for j := uint8(0); j < m; j++ { + for j := range m { if (oxo>>(7-j))&0x01 != 0 { return i*8 + j } @@ -48,7 +48,7 @@ func ExtendedProximity(one, other []byte) (ret uint8) { var m uint8 = 8 for i := uint8(0); i < b; i++ { oxo := one[i] ^ other[i] - for j := uint8(0); j < m; j++ { + for j := range m { if (oxo>>(7-j))&0x01 != 0 { return i*8 + j } diff --git a/pkg/swarm/test_helpers.go b/pkg/swarm/test_helpers.go index b043e1bb17d..e0d614ae0b1 100644 --- a/pkg/swarm/test_helpers.go +++ b/pkg/swarm/test_helpers.go @@ -55,7 +55,7 @@ func RandAddresses(tb testing.TB, count int) []Address { tb.Helper() result := make([]Address, count) - for i := 0; i < count; i++ { + for i := range count { result[i] = RandAddress(tb) } return result diff --git a/pkg/swarm/test_helpers_test.go b/pkg/swarm/test_helpers_test.go index e841fd5c6c5..11d88842681 100644 --- a/pkg/swarm/test_helpers_test.go +++ b/pkg/swarm/test_helpers_test.go @@ -29,7 +29,7 @@ func Test_RandAddressAt(t *testing.T) { hw0 := []byte{b0[0], b0[1], 0, 0} // highest words of base address hw0int := binary.BigEndian.Uint32(hw0) - for bitsInCommon := 0; bitsInCommon < 30; bitsInCommon++ { + for bitsInCommon := range 30 { addr := swarm.RandAddressAt(t, base, bitsInCommon) assertNotZeroAddress(t, addr) @@ -39,7 +39,7 @@ func Test_RandAddressAt(t *testing.T) { //bb0 is the bit mask to AND with hw0 and hw1 bb0 := uint32(0) - for i := 0; i < bitsInCommon; i++ { + for i := range bitsInCommon { bb0 |= (1 << (31 - i)) } @@ -62,7 +62,7 @@ func Test_RandAddresses(t *testing.T) { if got := len(addrs); got != count { t.Fatalf("expected %d, got %d", count, got) } - for i := 0; i < count; i++ { + for i := range count { assertNotZeroAddress(t, addrs[i]) } } diff --git a/pkg/swarm/utilities_test.go b/pkg/swarm/utilities_test.go index d3be0cac487..9fbd270513a 100644 --- a/pkg/swarm/utilities_test.go +++ b/pkg/swarm/utilities_test.go @@ -204,7 +204,7 @@ func Test_FindStampWithBatchID(t *testing.T) { func cloneAddresses(addrs []swarm.Address) []swarm.Address { result := make([]swarm.Address, len(addrs)) - for i := 0; i < len(addrs); i++ { + for i := range addrs { result[i] = addrs[i].Clone() } return result diff --git a/pkg/topology/kademlia/binprefix.go b/pkg/topology/kademlia/binprefix.go index 94e2550ea2d..4b1b119cffe 100644 --- a/pkg/topology/kademlia/binprefix.go +++ b/pkg/topology/kademlia/binprefix.go @@ -17,7 +17,7 @@ func generateCommonBinPrefixes(base swarm.Address, suffixLength int) [][]swarm.A bitCombinationsCount := int(math.Pow(2, float64(suffixLength))) bitSuffixes := make([]uint8, bitCombinationsCount) - for i := 0; i < bitCombinationsCount; i++ { + for i := range bitCombinationsCount { bitSuffixes[i] = uint8(i) } diff --git a/pkg/topology/kademlia/internal/metrics/metrics.go b/pkg/topology/kademlia/internal/metrics/metrics.go index 272dfea6d3f..706271ebcaa 100644 --- a/pkg/topology/kademlia/internal/metrics/metrics.go +++ b/pkg/topology/kademlia/internal/metrics/metrics.go @@ -297,7 +297,7 @@ func (c *Collector) Snapshot(t time.Time, addresses ...swarm.Address) map[string } if len(addresses) == 0 { - c.counters.Range(func(key, val interface{}) bool { + c.counters.Range(func(key, val any) bool { cs := val.(*Counters) snapshot[cs.peerAddress.ByteString()] = cs.snapshot(t) return true @@ -381,8 +381,8 @@ func (c *Collector) Inspect(addr swarm.Address) *Snapshot { // Flush sync the dirty in memory counters for all peers by flushing their // values to the underlying storage. func (c *Collector) Flush() error { - counters := make(map[string]interface{}) - c.counters.Range(func(key, val interface{}) bool { + counters := make(map[string]any) + c.counters.Range(func(key, val any) bool { cs := val.(*Counters) counters[cs.peerAddress.ByteString()] = val return true @@ -396,7 +396,7 @@ func (c *Collector) Flush() error { // Finalize tries to log out all ongoing peer sessions. func (c *Collector) Finalize(t time.Time, remove bool) error { - c.counters.Range(func(_, val interface{}) bool { + c.counters.Range(func(_, val any) bool { cs := val.(*Counters) PeerLogOut(t)(cs) return true @@ -407,7 +407,7 @@ func (c *Collector) Finalize(t time.Time, remove bool) error { } if remove { - c.counters.Range(func(_, val interface{}) bool { + c.counters.Range(func(_, val any) bool { cs := val.(*Counters) c.counters.Delete(cs.peerAddress.ByteString()) return true diff --git a/pkg/topology/kademlia/kademlia.go b/pkg/topology/kademlia/kademlia.go index 78d5feb0c40..e045c9658fe 100644 --- a/pkg/topology/kademlia/kademlia.go +++ b/pkg/topology/kademlia/kademlia.go @@ -533,9 +533,7 @@ func (k *Kad) manage() { balanceChan := make(chan *peerConnInfo) go k.connectionAttemptsHandler(ctx, &wg, neighbourhoodChan, balanceChan) - k.wg.Add(1) - go func() { - defer k.wg.Done() + k.wg.Go(func() { for { select { case <-k.halt: @@ -546,11 +544,9 @@ func (k *Kad) manage() { k.opt.PruneFunc(k.neighborhoodDepth()) } } - }() + }) - k.wg.Add(1) - go func() { - defer k.wg.Done() + k.wg.Go(func() { for { select { case <-k.halt: @@ -569,12 +565,10 @@ func (k *Kad) manage() { } } } - }() + }) // tell each neighbor about other neighbors periodically - k.wg.Add(1) - go func() { - defer k.wg.Done() + k.wg.Go(func() { for { select { case <-k.halt: @@ -597,7 +591,7 @@ func (k *Kad) manage() { } } } - }() + }) for { select { @@ -1041,7 +1035,7 @@ func (k *Kad) Announce(ctx context.Context, peer swarm.Address, fullnode bool) e isNeighbor := swarm.Proximity(peer.Bytes(), k.base.Bytes()) >= depth outer: - for bin := uint8(0); bin < swarm.MaxBins; bin++ { + for bin := range swarm.MaxBins { var ( connectedPeers []swarm.Address @@ -1589,7 +1583,7 @@ func randomSubset(addrs []swarm.Address, count int) ([]swarm.Address, error) { return addrs, nil } - for i := 0; i < len(addrs); i++ { + for i := range addrs { b, err := random.Int(random.Reader, big.NewInt(int64(len(addrs)))) if err != nil { return nil, err diff --git a/pkg/topology/kademlia/kademlia_test.go b/pkg/topology/kademlia/kademlia_test.go index bca195cdb10..441ac24ec49 100644 --- a/pkg/topology/kademlia/kademlia_test.go +++ b/pkg/topology/kademlia/kademlia_test.go @@ -69,7 +69,7 @@ func TestNeighborhoodDepth(t *testing.T) { testutil.CleanupCloser(t, kad) // add 2 peers in bin 8 - for i := 0; i < 2; i++ { + for range 2 { addr := swarm.RandAddressAt(t, base, 8) addOne(t, signer, kad, ab, addr) @@ -79,9 +79,9 @@ func TestNeighborhoodDepth(t *testing.T) { // depth is 0 kDepth(t, kad, 0) - var shallowPeers []swarm.Address + shallowPeers := make([]swarm.Address, 0, 2) // add two first peers (po0,po1) - for i := 0; i < 2; i++ { + for i := range 2 { addr := swarm.RandAddressAt(t, base, i) addOne(t, signer, kad, ab, addr) shallowPeers = append(shallowPeers, addr) @@ -114,8 +114,8 @@ func TestNeighborhoodDepth(t *testing.T) { // now add peers from bin 0 and expect the depth // to shift. the depth will be that of the shallowest // unsaturated bin. - for i := 0; i < 7; i++ { - for j := 0; j < 3; j++ { + for i := range 7 { + for range 3 { addr := swarm.RandAddressAt(t, base, i) addOne(t, signer, kad, ab, addr) waitConn(t, &conns) @@ -148,7 +148,7 @@ func TestNeighborhoodDepth(t *testing.T) { kDepth(t, kad, 7) // now fill bin 7 so that it is saturated, expect depth 8 - for i := 0; i < 3; i++ { + for range 3 { addr := swarm.RandAddressAt(t, base, 7) addOne(t, signer, kad, ab, addr) waitConn(t, &conns) @@ -164,7 +164,7 @@ func TestNeighborhoodDepth(t *testing.T) { var addrs []swarm.Address // fill the rest up to the bin before last and check that everything works at the edges for i := 9; i < int(swarm.MaxBins); i++ { - for j := 0; j < 4; j++ { + for range 4 { addr := swarm.RandAddressAt(t, base, i) addOne(t, signer, kad, ab, addr) waitConn(t, &conns) @@ -174,7 +174,7 @@ func TestNeighborhoodDepth(t *testing.T) { } // add a whole bunch of peers in the last bin, expect depth to stay at 31 - for i := 0; i < 15; i++ { + for range 15 { addr = swarm.RandAddressAt(t, base, int(swarm.MaxPO)) addOne(t, signer, kad, ab, addr) } @@ -187,7 +187,7 @@ func TestNeighborhoodDepth(t *testing.T) { kDepth(t, kad, 30) // empty bin 9 and expect depth 9 - for i := 0; i < 4; i++ { + for i := range 4 { removeOne(kad, addrs[i]) } kDepth(t, kad, 9) @@ -216,7 +216,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) { kad.SetStorageRadius(0) // add 2 peers in bin 8 - for i := 0; i < 2; i++ { + for range 2 { addr := swarm.RandAddressAt(t, base, 8) addOne(t, signer, kad, ab, addr) kad.Reachable(addr, p2p.ReachabilityStatusPublic) @@ -227,9 +227,9 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) { // depth is 0 kDepth(t, kad, 0) - var shallowPeers []swarm.Address + shallowPeers := make([]swarm.Address, 0, 2) // add two first peers (po0,po1) - for i := 0; i < 2; i++ { + for i := range 2 { addr := swarm.RandAddressAt(t, base, i) addOne(t, signer, kad, ab, addr) kad.Reachable(addr, p2p.ReachabilityStatusPublic) @@ -262,8 +262,8 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) { // now add peers from bin 0 and expect the depth // to shift. the depth will be that of the shallowest // unsaturated bin. - for i := 0; i < 7; i++ { - for j := 0; j < 3; j++ { + for i := range 7 { + for range 3 { addr := swarm.RandAddressAt(t, base, i) addOne(t, signer, kad, ab, addr) kad.Reachable(addr, p2p.ReachabilityStatusPublic) @@ -292,7 +292,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) { kDepth(t, kad, 7) // now fill bin 7 so that it is saturated, expect depth 8 - for i := 0; i < 3; i++ { + for range 3 { addr := swarm.RandAddressAt(t, base, 7) addOne(t, signer, kad, ab, addr) kad.Reachable(addr, p2p.ReachabilityStatusPublic) @@ -310,7 +310,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) { var addrs []swarm.Address // fill the rest up to the bin before last and check that everything works at the edges for i := 9; i < int(swarm.MaxBins); i++ { - for j := 0; j < 4; j++ { + for range 4 { addr := swarm.RandAddressAt(t, base, i) addOne(t, signer, kad, ab, addr) kad.Reachable(addr, p2p.ReachabilityStatusPublic) @@ -321,7 +321,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) { } // add a whole bunch of peers in the last bin, expect depth to stay at 31 - for i := 0; i < 15; i++ { + for range 15 { addr = swarm.RandAddressAt(t, base, int(swarm.MaxPO)) addOne(t, signer, kad, ab, addr) kad.Reachable(addr, p2p.ReachabilityStatusPublic) @@ -335,7 +335,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) { kDepth(t, kad, 30) // empty bin 9 and expect depth 9 - for i := 0; i < 4; i++ { + for i := range 4 { removeOne(kad, addrs[i]) } kDepth(t, kad, 9) @@ -365,7 +365,7 @@ func TestManage(t *testing.T) { kad.SetStorageRadius(0) // first, we add peers to bin 0 - for i := 0; i < saturation; i++ { + for range saturation { addr := swarm.RandAddressAt(t, base, 0) addOne(t, signer, kad, ab, addr) } @@ -373,7 +373,7 @@ func TestManage(t *testing.T) { waitCounter(t, &conns, int32(saturation)) // next, we add peers to the next bin - for i := 0; i < saturation; i++ { + for range saturation { addr := swarm.RandAddressAt(t, base, 1) addOne(t, signer, kad, ab, addr) } @@ -383,7 +383,7 @@ func TestManage(t *testing.T) { kad.SetStorageRadius(1) // here, we attempt to add to bin 0, but bin is saturated, so no new peers should connect to it - for i := 0; i < saturation; i++ { + for range saturation { addr := swarm.RandAddressAt(t, base, 0) addOne(t, signer, kad, ab, addr) } @@ -466,8 +466,8 @@ func TestBinSaturation(t *testing.T) { // add two peers in a few bins to generate some depth >= 0, this will // make the next iteration result in binSaturated==true, causing no new // connections to be made - for i := 0; i < 5; i++ { - for j := 0; j < 2; j++ { + for i := range 5 { + for range 2 { addr := swarm.RandAddressAt(t, base, i) addOne(t, signer, kad, ab, addr) } @@ -479,7 +479,7 @@ func TestBinSaturation(t *testing.T) { // add one more peer in each bin shallower than depth and // expect no connections due to saturation. if we add a peer within // depth, the short circuit will be hit and we will connect to the peer - for i := 0; i < 3; i++ { + for i := range 3 { addr := swarm.RandAddressAt(t, base, i) addOne(t, signer, kad, ab, addr) } @@ -514,8 +514,8 @@ func TestOversaturation(t *testing.T) { testutil.CleanupCloser(t, kad) // Add maximum accepted number of peers up until bin 5 without problems - for i := 0; i < 6; i++ { - for j := 0; j < kademlia.DefaultOverSaturationPeers; j++ { + for i := range 6 { + for range kademlia.DefaultOverSaturationPeers { addr := swarm.RandAddressAt(t, base, i) // if error is not nil as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, nil) @@ -527,9 +527,9 @@ func TestOversaturation(t *testing.T) { // see depth is 5 kDepth(t, kad, 5) - for k := 0; k < 5; k++ { + for k := range 5 { // no further connections can be made - for l := 0; l < 3; l++ { + for range 3 { addr := swarm.RandAddressAt(t, base, k) // if error is not as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, topology.ErrOversaturated) @@ -566,8 +566,8 @@ func TestOversaturationBootnode(t *testing.T) { testutil.CleanupCloser(t, kad) // Add maximum accepted number of peers up until bin 5 without problems - for i := 0; i < 6; i++ { - for j := 0; j < overSaturationPeers; j++ { + for i := range 6 { + for range overSaturationPeers { addr := swarm.RandAddressAt(t, base, i) // if error is not nil as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, nil) @@ -579,9 +579,9 @@ func TestOversaturationBootnode(t *testing.T) { // see depth is 5 kDepth(t, kad, 5) - for k := 0; k < 5; k++ { + for k := range 5 { // further connections should succeed outside of depth - for l := 0; l < 3; l++ { + for range 3 { addr := swarm.RandAddressAt(t, base, k) // if error is not as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, nil) @@ -595,7 +595,7 @@ func TestOversaturationBootnode(t *testing.T) { } // see we can still add / not limiting more peers in neighborhood depth - for m := 0; m < 12; m++ { + for range 12 { addr := swarm.RandAddressAt(t, base, 5) // if error is not nil as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, nil) @@ -624,8 +624,8 @@ func TestBootnodeMaxConnections(t *testing.T) { testutil.CleanupCloser(t, kad) // Add maximum accepted number of peers up until bin 5 without problems - for i := 0; i < 6; i++ { - for j := 0; j < bootnodeOverSaturationPeers; j++ { + for i := range 6 { + for range bootnodeOverSaturationPeers { addr := swarm.RandAddressAt(t, base, i) // if error is not nil as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, nil) @@ -640,9 +640,9 @@ func TestBootnodeMaxConnections(t *testing.T) { depth := 5 outSideDepthPeers := 5 - for k := 0; k < depth; k++ { + for k := range depth { // further connections should succeed outside of depth - for l := 0; l < outSideDepthPeers; l++ { + for range outSideDepthPeers { addr := swarm.RandAddressAt(t, base, k) // if error is not as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, nil) @@ -1109,7 +1109,7 @@ func TestKademlia_SubscribeTopologyChange(t *testing.T) { c2, u2 := kad.SubscribeTopologyChange() defer u2() - for i := 0; i < 4; i++ { + for i := range 4 { addr := swarm.RandAddressAt(t, base, i) addOne(t, sg, kad, ab, addr) } @@ -1129,14 +1129,14 @@ func TestKademlia_SubscribeTopologyChange(t *testing.T) { c, u := kad.SubscribeTopologyChange() defer u() - for i := 0; i < 4; i++ { + for i := range 4 { addr := swarm.RandAddressAt(t, base, i) addOne(t, sg, kad, ab, addr) } testSignal(t, c) - for i := 0; i < 4; i++ { + for i := range 4 { addr := swarm.RandAddressAt(t, base, i) addOne(t, sg, kad, ab, addr) } @@ -1210,9 +1210,9 @@ func getBinPopulation(bins *topology.KadBins, po uint8) uint64 { func TestStart(t *testing.T) { t.Parallel() - var bootnodes []ma.Multiaddr - var bootnodesOverlays []swarm.Address - for i := 0; i < 10; i++ { + bootnodes := make([]ma.Multiaddr, 0, 10) + bootnodesOverlays := make([]swarm.Address, 0, 10) + for range 10 { overlay := swarm.RandAddress(t) multiaddr, err := ma.NewMultiaddr(underlayBase + overlay.String()) @@ -1231,7 +1231,7 @@ func TestStart(t *testing.T) { var conns, failedConns int32 // how many connect calls were made to the p2p mock _, kad, ab, _, signer := newTestKademlia(t, &conns, &failedConns, kademlia.Options{Bootnodes: bootnodes}) - for i := 0; i < 3; i++ { + for range 3 { peer := swarm.RandAddress(t) multiaddr, err := ma.NewMultiaddr(underlayBase + peer.String()) if err != nil { @@ -1324,7 +1324,7 @@ func TestOutofDepthPrune(t *testing.T) { testutil.CleanupCloser(t, kad) // bin 0,1 balanced, rest not - for i := 0; i < 6; i++ { + for i := range 6 { var peers []swarm.Address if i < 2 { peers = mineBin(t, base, i, 20, true) @@ -1350,7 +1350,7 @@ func TestOutofDepthPrune(t *testing.T) { // check that no pruning has happened bins := binSizes(kad) - for i := 0; i < 6; i++ { + for i := range 6 { if bins[i] <= overSaturationPeers { t.Fatalf("bin %d, got %d, want more than %d", i, bins[i], overSaturationPeers) } @@ -1375,7 +1375,7 @@ func TestOutofDepthPrune(t *testing.T) { // check bins have been pruned bins = binSizes(kad) - for i := uint8(0); i < 5; i++ { + for i := range uint8(5) { if bins[i] != overSaturationPeers { t.Fatalf("bin %d, got %d, want %d", i, bins[i], overSaturationPeers) } @@ -1426,7 +1426,7 @@ func TestPruneExcludeOps(t *testing.T) { testutil.CleanupCloser(t, kad) // bin 0,1 balanced, rest not - for i := 0; i < 6; i++ { + for i := range 6 { var peers []swarm.Address if i < 2 { peers = mineBin(t, base, i, perBin, true) @@ -1441,7 +1441,7 @@ func TestPruneExcludeOps(t *testing.T) { kad.Reachable(peers[i], p2p.ReachabilityStatusPublic) } } - for i := 0; i < 4; i++ { + for range 4 { } time.Sleep(time.Millisecond * 10) kDepth(t, kad, i) @@ -1459,7 +1459,7 @@ func TestPruneExcludeOps(t *testing.T) { // check that no pruning has happened bins := binSizes(kad) - for i := 0; i < 6; i++ { + for i := range 6 { if bins[i] <= overSaturationPeers { t.Fatalf("bin %d, got %d, want more than %d", i, bins[i], overSaturationPeers) } @@ -1484,7 +1484,7 @@ func TestPruneExcludeOps(t *testing.T) { // check bins have NOT been pruned because the peer count func excluded unreachable peers bins = binSizes(kad) - for i := uint8(0); i < 5; i++ { + for i := range uint8(5) { if bins[i] != perBin { t.Fatalf("bin %d, got %d, want %d", i, bins[i], perBin) } @@ -1501,7 +1501,7 @@ func TestBootnodeProtectedNodes(t *testing.T) { // create base and protected nodes addresses base := swarm.RandAddress(t) protected := make([]swarm.Address, 6) - for i := 0; i < 6; i++ { + for i := range 6 { addr := swarm.RandAddressAt(t, base, i) protected[i] = addr } @@ -1526,8 +1526,8 @@ func TestBootnodeProtectedNodes(t *testing.T) { testutil.CleanupCloser(t, kad) // Add maximum accepted number of peers up until bin 5 without problems - for i := 0; i < 6; i++ { - for j := 0; j < overSaturationPeers; j++ { + for i := range 6 { + for range overSaturationPeers { // if error is not nil as specified, connectOne goes fatal connectOne(t, signer, kad, ab, protected[i], nil) } @@ -1538,7 +1538,7 @@ func TestBootnodeProtectedNodes(t *testing.T) { // see depth is 5 kDepth(t, kad, 5) - for k := 0; k < 5; k++ { + for k := range 5 { // further connections should succeed outside of depth addr := swarm.RandAddressAt(t, base, k) // if error is not as specified, connectOne goes fatal @@ -1549,14 +1549,14 @@ func TestBootnodeProtectedNodes(t *testing.T) { // ensure protected node was not kicked out and we have more than oversaturation // amount sizes := binSizes(kad) - for k := 0; k < 5; k++ { + for k := range 5 { if sizes[k] != 2 { t.Fatalf("invalid bin size expected 2 found %d", sizes[k]) } } - for k := 0; k < 5; k++ { + for k := range 5 { // further connections should succeed outside of depth - for l := 0; l < 3; l++ { + for range 3 { addr := swarm.RandAddressAt(t, base, k) // if error is not as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, nil) @@ -1567,7 +1567,7 @@ func TestBootnodeProtectedNodes(t *testing.T) { // ensure unprotected nodes are kicked out to make room for new peers and protected // nodes are still present sizes = binSizes(kad) - for k := 0; k < 5; k++ { + for k := range 5 { if sizes[k] != 2 { t.Fatalf("invalid bin size expected 2 found %d", sizes[k]) } @@ -1691,8 +1691,8 @@ func TestAnnounceNeighborhoodToNeighbor(t *testing.T) { testutil.CleanupCloser(t, kad) // add some peers - for bin := 0; bin < 2; bin++ { - for i := 0; i < 4; i++ { + for bin := range 2 { + for range 4 { addr := swarm.RandAddressAt(t, base, bin) addOne(t, signer, kad, ab, addr) waitCounter(t, &conns, 1) @@ -1703,7 +1703,7 @@ func TestAnnounceNeighborhoodToNeighbor(t *testing.T) { kDepth(t, kad, 1) // add many more neighbors - for i := 0; i < 10; i++ { + for range 10 { addr := swarm.RandAddressAt(t, base, 2) addOne(t, signer, kad, ab, addr) waitCounter(t, &conns, 1) @@ -1740,8 +1740,8 @@ func TestIteratorOpts(t *testing.T) { } testutil.CleanupCloser(t, kad) - for i := 0; i < 6; i++ { - for j := 0; j < 4; j++ { + for i := range 6 { + for range 4 { addr := swarm.RandAddressAt(t, base, i) // if error is not nil as specified, connectOne goes fatal connectOne(t, signer, kad, ab, addr, nil) @@ -1903,7 +1903,7 @@ func mineBin(t *testing.T, base swarm.Address, bin, count int, isBalanced bool) t.Fatal("peersCount must be greater than 8 for balanced bins") } - for i := 0; i < count; i++ { + for i := range count { rndAddrs[i] = swarm.RandAddressAt(t, base, bin) } diff --git a/pkg/topology/pslice/pslice_test.go b/pkg/topology/pslice/pslice_test.go index c7b540450b4..07eadaafdd8 100644 --- a/pkg/topology/pslice/pslice_test.go +++ b/pkg/topology/pslice/pslice_test.go @@ -24,8 +24,8 @@ func TestShallowestEmpty(t *testing.T) { peers = make([][]swarm.Address, 16) ) - for i := 0; i < 16; i++ { - for j := 0; j < 3; j++ { + for i := range 16 { + for range 3 { a := swarm.RandAddressAt(t, base, i) peers[i] = append(peers[i], a) } @@ -221,7 +221,7 @@ func TestIterators(t *testing.T) { ps := pslice.New(4, base) peers := make([]swarm.Address, 4) - for i := 0; i < 4; i++ { + for i := range 4 { peers[i] = swarm.RandAddressAt(t, base, i) } @@ -286,7 +286,7 @@ func TestBinPeers(t *testing.T) { // prepare slice ps := pslice.New(len(tc.peersCount), base) for bin, peersCount := range tc.peersCount { - for i := 0; i < peersCount; i++ { + for range peersCount { peer := swarm.RandAddressAt(t, base, bin) binPeers[bin] = append(binPeers[bin], peer) ps.Add(peer) @@ -342,8 +342,8 @@ func TestIteratorsJumpStop(t *testing.T) { ps := pslice.New(4, base) peers := make([]swarm.Address, 0, 12) - for i := 0; i < 4; i++ { - for ii := 0; ii < 3; ii++ { + for i := range 4 { + for range 3 { a := swarm.RandAddressAt(t, base, i) peers = append(peers, a) ps.Add(a) diff --git a/pkg/tracing/tracing_test.go b/pkg/tracing/tracing_test.go index 9c0baa2f685..c6acb330b18 100644 --- a/pkg/tracing/tracing_test.go +++ b/pkg/tracing/tracing_test.go @@ -144,7 +144,7 @@ func TestStartSpanFromContext_logger(t *testing.T) { wantTraceID := span.Context().(jaeger.SpanContext).TraceID() logger.Info("msg") - data := make(map[string]interface{}) + data := make(map[string]any) if err := json.Unmarshal(buf.Bytes(), &data); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -192,7 +192,7 @@ func TestNewLoggerWithTraceID(t *testing.T) { wantTraceID := span.Context().(jaeger.SpanContext).TraceID() logger.Info("msg") - data := make(map[string]interface{}) + data := make(map[string]any) if err := json.Unmarshal(buf.Bytes(), &data); err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/transaction/event.go b/pkg/transaction/event.go index 8eb2001ebf7..587b3d86427 100644 --- a/pkg/transaction/event.go +++ b/pkg/transaction/event.go @@ -18,7 +18,7 @@ var ( ) // ParseEvent will parse the specified abi event from the given log -func ParseEvent(a *abi.ABI, eventName string, c interface{}, e types.Log) error { +func ParseEvent(a *abi.ABI, eventName string, c any, e types.Log) error { if len(e.Topics) == 0 { return ErrNoTopic } @@ -37,7 +37,7 @@ func ParseEvent(a *abi.ABI, eventName string, c interface{}, e types.Log) error } // FindSingleEvent will find the first event of the given kind. -func FindSingleEvent(abi *abi.ABI, receipt *types.Receipt, contractAddress common.Address, event abi.Event, out interface{}) error { +func FindSingleEvent(abi *abi.ABI, receipt *types.Receipt, contractAddress common.Address, event abi.Event, out any) error { if receipt.Status != 1 { return ErrTransactionReverted } diff --git a/pkg/transaction/mock/transaction.go b/pkg/transaction/mock/transaction.go index d987fd52249..072f47cf8f2 100644 --- a/pkg/transaction/mock/transaction.go +++ b/pkg/transaction/mock/transaction.go @@ -171,10 +171,10 @@ type Call struct { to common.Address result []byte method string - params []interface{} + params []any } -func ABICall(abi *abi.ABI, to common.Address, result []byte, method string, params ...interface{}) Call { +func ABICall(abi *abi.ABI, to common.Address, result []byte, method string, params ...any) Call { return Call{ to: to, abi: abi, @@ -216,11 +216,11 @@ func WithABICallSequence(calls ...Call) Option { }) } -func WithABICall(abi *abi.ABI, to common.Address, result []byte, method string, params ...interface{}) Option { +func WithABICall(abi *abi.ABI, to common.Address, result []byte, method string, params ...any) Option { return WithABICallSequence(ABICall(abi, to, result, method, params...)) } -func WithABISend(abi *abi.ABI, txHash common.Hash, expectedAddress common.Address, expectedValue *big.Int, method string, params ...interface{}) Option { +func WithABISend(abi *abi.ABI, txHash common.Hash, expectedAddress common.Address, expectedValue *big.Int, method string, params ...any) Option { return optionFunc(func(s *transactionServiceMock) { s.send = func(ctx context.Context, request *transaction.TxRequest, boost int) (common.Hash, error) { data, err := abi.Pack(method, params...) diff --git a/pkg/transaction/transaction.go b/pkg/transaction/transaction.go index 2f2405dd432..bd63d7cfce1 100644 --- a/pkg/transaction/transaction.go +++ b/pkg/transaction/transaction.go @@ -15,6 +15,7 @@ import ( "time" "context" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" @@ -223,9 +224,7 @@ func (t *transactionService) Send(ctx context.Context, request *TxRequest, boost } func (t *transactionService) waitForPendingTx(txHash common.Hash) { - t.wg.Add(1) - go func() { - defer t.wg.Done() + t.wg.Go(func() { switch _, err := t.WaitForReceipt(t.ctx, txHash); err { case nil: t.logger.Info("pending transaction confirmed", "tx", txHash) @@ -240,7 +239,7 @@ func (t *transactionService) waitForPendingTx(txHash common.Hash) { t.logger.Error(err, "waiting for pending transaction failed", "tx", txHash) } } - }() + }) } func (t *transactionService) Call(ctx context.Context, request *TxRequest) ([]byte, error) { @@ -597,9 +596,9 @@ func (t *transactionService) UnwrapABIError(ctx context.Context, req *TxRequest, continue } - values, ok := data.([]interface{}) + values, ok := data.([]any) if !ok { - values = make([]interface{}, len(abiError.Inputs)) + values = make([]any, len(abiError.Inputs)) for i := range values { values[i] = "?" } diff --git a/pkg/transaction/transaction_test.go b/pkg/transaction/transaction_test.go index 079e341dd27..26c3f81111d 100644 --- a/pkg/transaction/transaction_test.go +++ b/pkg/transaction/transaction_test.go @@ -852,9 +852,9 @@ type rpcAPIError struct { err string } -func (e *rpcAPIError) ErrorCode() int { return e.code } -func (e *rpcAPIError) Error() string { return e.msg } -func (e *rpcAPIError) ErrorData() interface{} { return e.err } +func (e *rpcAPIError) ErrorCode() int { return e.code } +func (e *rpcAPIError) Error() string { return e.msg } +func (e *rpcAPIError) ErrorData() any { return e.err } var _ rpc.DataError = (*rpcAPIError)(nil) diff --git a/pkg/util/testutil/pseudorand/reader_test.go b/pkg/util/testutil/pseudorand/reader_test.go index 2abd6aaf382..90f5abe11ae 100644 --- a/pkg/util/testutil/pseudorand/reader_test.go +++ b/pkg/util/testutil/pseudorand/reader_test.go @@ -97,7 +97,7 @@ func TestReader(t *testing.T) { } }) t.Run("seek and match", func(t *testing.T) { - for i := 0; i < 20; i++ { + for range 20 { off := rand.Intn(size) n := rand.Intn(size - off) t.Run(fmt.Sprintf("off=%d n=%d", off, n), func(t *testing.T) {