From 52411d52e23cbfa9400a8a17678ff8f9ce63775f Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sun, 15 Feb 2026 15:21:50 +0100 Subject: [PATCH] modernize some code Results of running the modernize command; go install golang.org/x/tools/go/analysis/passes/modernize/cmd/modernize@latest modernize -fix ./... Signed-off-by: Sebastiaan van Stijn --- analyzer/analyzer.go | 6 ++--- analyzer/recorder/recorder.go | 1 - analyzer/recorder/recorder_test.go | 1 - cache/cache.go | 6 ++--- cmd/containerd-stargz-grpc/db/reader.go | 7 ++---- .../ipfs/resolvehandler.go | 2 +- cmd/stargz-store/main.go | 5 ++-- estargz/build.go | 1 - estargz/build_test.go | 3 --- estargz/estargz_test.go | 5 +--- estargz/gzip_test.go | 2 +- estargz/testutil.go | 21 ++++------------ estargz/types.go | 2 +- estargz/zstdchunked/zstdchunked_test.go | 8 +++---- fs/fs.go | 3 +-- fs/layer/layer.go | 10 ++++---- fs/layer/testutil.go | 9 +++---- fs/metrics/layer/metrics.go | 7 ++---- fs/reader/reader.go | 11 ++++----- fs/reader/testutil.go | 24 +++++++------------ fs/remote/blob.go | 2 +- fs/remote/blob_test.go | 4 ++-- fs/remote/resolver.go | 23 ++++++------------ metadata/testutil/testutil.go | 9 ++++--- .../estargz/externaltoc/converter.go | 2 +- service/keychain/cri/cri.go | 2 +- service/keychain/kubeconfig/kubeconfig.go | 6 ++--- service/plugincore/plugin.go | 2 +- service/resolver/registry.go | 6 ++--- store/manager.go | 7 ++---- store/refs.go | 2 +- util/cacheutil/lrucache.go | 12 +++++----- util/cacheutil/lrucache_test.go | 6 ++--- util/cacheutil/ttlcache.go | 6 ++--- util/cacheutil/ttlcache_test.go | 8 +++---- 35 files changed, 86 insertions(+), 145 deletions(-) diff --git a/analyzer/analyzer.go b/analyzer/analyzer.go index c65a297d4..5d76e2ada 100644 --- a/analyzer/analyzer.go +++ b/analyzer/analyzer.go @@ -157,7 +157,7 @@ func Analyze(ctx context.Context, client *containerd.Client, ref string, opts .. // Create the container and the task var container containerd.Container - for i := 0; i < 3; i++ { + for range 3 { id := xid.New().String() var s runtimespec.Spec container, err = client.NewContainer(ctx, id, @@ -221,8 +221,8 @@ func Analyze(ctx context.Context, client *containerd.Client, ref string, opts .. prePaths := preMonitor.GetPaths() for _, path := range prePaths { cleanPath := path - if strings.HasPrefix(path, target) { - cleanPath = strings.TrimPrefix(path, target) + if after, ok := strings.CutPrefix(path, target); ok { + cleanPath = after } if err := rc.Record(cleanPath); err != nil { log.G(ctx).WithError(err).Debugf("failed to record pre-container path %q", cleanPath) diff --git a/analyzer/recorder/recorder.go b/analyzer/recorder/recorder.go index dd43f4e1f..7daceb125 100644 --- a/analyzer/recorder/recorder.go +++ b/analyzer/recorder/recorder.go @@ -80,7 +80,6 @@ func imageRecorderFromManifest(ctx context.Context, cs content.Store, manifestDe var eg errgroup.Group filesMap := make([]map[string]struct{}, len(manifest.Layers)) for i, desc := range manifest.Layers { - i, desc := i, desc filesMap[i] = make(map[string]struct{}) // Create the index from the layer blob. diff --git a/analyzer/recorder/recorder_test.go b/analyzer/recorder/recorder_test.go index f8609d78b..6fdc4d655 100644 --- a/analyzer/recorder/recorder_test.go +++ b/analyzer/recorder/recorder_test.go @@ -217,7 +217,6 @@ func TestNodeIndex(t *testing.T) { ctx := context.Background() for _, tt := range tests { for _, prefix := range allowedPrefix { - prefix := prefix for mediatype, cWrapper := range compressWrappers { t.Run(tt.name+":"+mediatype+",prefix="+prefix, func(t *testing.T) { var layers []ocispec.Descriptor diff --git a/cache/cache.go b/cache/cache.go index 745d95fdc..34101b5ce 100644 --- a/cache/cache.go +++ b/cache/cache.go @@ -134,7 +134,7 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache bufPool := config.BufPool if bufPool == nil { bufPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, } @@ -146,7 +146,7 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache maxEntry = defaultMaxLRUCacheEntry } dataCache = cacheutil.NewLRUCache(maxEntry) - dataCache.OnEvicted = func(key string, value interface{}) { + dataCache.OnEvicted = func(key string, value any) { value.(*bytes.Buffer).Reset() bufPool.Put(value) } @@ -158,7 +158,7 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache maxEntry = defaultMaxCacheFds } fdCache = cacheutil.NewLRUCache(maxEntry) - fdCache.OnEvicted = func(key string, value interface{}) { + fdCache.OnEvicted = func(key string, value any) { value.(*os.File).Close() } } diff --git a/cmd/containerd-stargz-grpc/db/reader.go b/cmd/containerd-stargz-grpc/db/reader.go index afac05fc6..407fe3a16 100644 --- a/cmd/containerd-stargz-grpc/db/reader.go +++ b/cmd/containerd-stargz-grpc/db/reader.go @@ -221,7 +221,7 @@ func (r *reader) init(decompressedR io.Reader, rOpts metadata.Options) (retErr e // Initialize root node var ok bool - for i := 0; i < 100; i++ { + for range 100 { fsID := xid.New().String() if err := r.initRootNode(fsID); err != nil { if errors.Is(err, errbolt.ErrBucketExists) { @@ -930,10 +930,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { } compressedBytesRemain := fr.nextOffset - ent.offset - bufSize := int(2 << 20) - if bufSize > int(compressedBytesRemain) { - bufSize = int(compressedBytesRemain) - } + bufSize := min(int(2<<20), int(compressedBytesRemain)) br := bufio.NewReaderSize(io.NewSectionReader(fr.r.sr, ent.offset, compressedBytesRemain), bufSize) if _, err := br.Peek(bufSize); err != nil { diff --git a/cmd/containerd-stargz-grpc/ipfs/resolvehandler.go b/cmd/containerd-stargz-grpc/ipfs/resolvehandler.go index 0e06ac8a1..8321e8474 100644 --- a/cmd/containerd-stargz-grpc/ipfs/resolvehandler.go +++ b/cmd/containerd-stargz-grpc/ipfs/resolvehandler.go @@ -74,6 +74,6 @@ func (f *fetcher) Check() error { } func (f *fetcher) GenID(off int64, size int64) string { - sum := sha256.Sum256([]byte(fmt.Sprintf("%s-%d-%d", f.cid, off, size))) + sum := sha256.Sum256(fmt.Appendf(nil, "%s-%d-%d", f.cid, off, size)) return fmt.Sprintf("%x", sum) } diff --git a/cmd/stargz-store/main.go b/cmd/stargz-store/main.go index 5727bc94b..0b5df3c11 100644 --- a/cmd/stargz-store/main.go +++ b/cmd/stargz-store/main.go @@ -25,6 +25,7 @@ import ( "fmt" "io" golog "log" + "maps" "math/rand" "net" "os" @@ -250,9 +251,7 @@ func (sk *storeKeychain) add(data []byte) error { if sk.config == nil { sk.config = make(map[string]authConfig) } - for k, c := range conf { - sk.config[k] = c - } + maps.Copy(sk.config, conf) sk.configMu.Unlock() return nil } diff --git a/estargz/build.go b/estargz/build.go index a9e1b72ba..976fd4aff 100644 --- a/estargz/build.go +++ b/estargz/build.go @@ -238,7 +238,6 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { var mu sync.Mutex var eg errgroup.Group for i, parts := range tarParts { - i, parts := i, parts // builds verifiable stargz sub-blobs eg.Go(func() error { esgzFile, err := layerFiles.TempFile("", "esgzdata") diff --git a/estargz/build_test.go b/estargz/build_test.go index 805da3034..f2a0aef90 100644 --- a/estargz/build_test.go +++ b/estargz/build_test.go @@ -354,11 +354,8 @@ func TestSort(t *testing.T) { } for _, tt := range tests { for _, srcCompression := range srcCompressions { - srcCompression := srcCompression for _, logprefix := range allowedPrefix { - logprefix := logprefix for _, tarprefix := range allowedPrefix { - tarprefix := tarprefix t.Run(fmt.Sprintf("%s-logprefix=%q-tarprefix=%q-src=%d", tt.name, logprefix, tarprefix, srcCompression), func(t *testing.T) { // Sort tar file var pfiles []string diff --git a/estargz/estargz_test.go b/estargz/estargz_test.go index c1a8059b1..a2bfe9906 100644 --- a/estargz/estargz_test.go +++ b/estargz/estargz_test.go @@ -101,10 +101,7 @@ func regularFileReader(name string, size int64, chunkSize int64) (*TOCEntry, *Re var written int64 for written < size { remain := size - written - cs := chunkSize - if remain < cs { - cs = remain - } + cs := min(remain, chunkSize) ent.ChunkSize = cs ent.ChunkOffset = written chunks = append(chunks, ent) diff --git a/estargz/gzip_test.go b/estargz/gzip_test.go index 44946f892..18e073612 100644 --- a/estargz/gzip_test.go +++ b/estargz/gzip_test.go @@ -121,7 +121,7 @@ func checkLegacyFooter(t *testing.T, off int64) { func legacyFooterBytes(tocOff int64) []byte { buf := bytes.NewBuffer(make([]byte, 0, legacyFooterSize)) gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) - gz.Extra = []byte(fmt.Sprintf("%016xSTARGZ", tocOff)) + gz.Extra = fmt.Appendf(nil, "%016xSTARGZ", tocOff) gz.Close() if buf.Len() != legacyFooterSize { panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), legacyFooterSize)) diff --git a/estargz/testutil.go b/estargz/testutil.go index ff165e090..73b37ec04 100644 --- a/estargz/testutil.go +++ b/estargz/testutil.go @@ -187,15 +187,10 @@ func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { tt.minChunkSize = []int{0} } for _, srcCompression := range srcCompressions { - srcCompression := srcCompression for _, newCL := range controllers { - newCL := newCL for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { - srcTarFormat := srcTarFormat for _, prefix := range allowedPrefix { - prefix := prefix for _, minChunkSize := range tt.minChunkSize { - minChunkSize := minChunkSize t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) { tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) // Test divideEntries() @@ -675,15 +670,10 @@ func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) tt.minChunkSize = []int{0} } for _, srcCompression := range srcCompressions { - srcCompression := srcCompression for _, newCL := range controllers { - newCL := newCL for _, prefix := range allowedPrefix { - prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { - srcTarFormat := srcTarFormat for _, minChunkSize := range tt.minChunkSize { - minChunkSize := minChunkSize t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) { // Get original tar file and chunk digests dgstMap := make(map[string]digest.Digest) @@ -1488,11 +1478,8 @@ func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { for _, tt := range tests { for _, newCL := range controllers { - newCL := newCL for _, prefix := range allowedPrefix { - prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { - srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) @@ -2072,7 +2059,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format return f(tw, prefix, format) } -func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { +func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...any) *io.SectionReader { format := tar.FormatUnknown for _, opt := range opts { switch v := opt.(type) { @@ -2096,7 +2083,7 @@ func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) * return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) } -func dir(name string, opts ...interface{}) tarEntry { +func dir(name string, opts ...any) tarEntry { return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { var o owner mode := os.FileMode(0755) @@ -2137,7 +2124,7 @@ type owner struct { gid int } -func file(name, contents string, opts ...interface{}) tarEntry { +func file(name, contents string, opts ...any) tarEntry { return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { var xattrs xAttr var o owner @@ -2349,7 +2336,7 @@ func (f fileInfoOnlyMode) Size() int64 { return 0 } func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } -func (f fileInfoOnlyMode) Sys() interface{} { return nil } +func (f fileInfoOnlyMode) Sys() any { return nil } func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) { if len(streams) == 0 { diff --git a/estargz/types.go b/estargz/types.go index 57e0aa614..41eb6571f 100644 --- a/estargz/types.go +++ b/estargz/types.go @@ -246,7 +246,7 @@ func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } func (fi fileInfo) Size() int64 { return fi.e.Size } func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } -func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Sys() any { return fi.e } func (fi fileInfo) Mode() (m os.FileMode) { // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & diff --git a/estargz/zstdchunked/zstdchunked_test.go b/estargz/zstdchunked/zstdchunked_test.go index c068a97fd..17d5722f4 100644 --- a/estargz/zstdchunked/zstdchunked_test.go +++ b/estargz/zstdchunked/zstdchunked_test.go @@ -21,7 +21,7 @@ import ( "crypto/sha256" "fmt" "io" - "sort" + "slices" "testing" "github.com/containerd/stargz-snapshotter/estargz" @@ -79,9 +79,7 @@ func (zc *zstdController) TestStreams(t estargz.TestingT, b []byte, streams []in // We expect the last offset is footer offset. // 8 is the size of the zstd skippable frame header + the frame size (see WriteTOCAndFooter) - sort.Slice(streams, func(i, j int) bool { - return streams[i] < streams[j] - }) + slices.Sort(streams) streams[len(streams)-1] = streams[len(streams)-1] - 8 wants := map[int64]struct{}{} for _, s := range streams { @@ -127,7 +125,7 @@ func (zc *zstdController) TestStreams(t estargz.TestingT, b []byte, streams []in } func nextIndex(s1, sub []byte) int { - for i := 0; i < len(s1); i++ { + for i := range s1 { if len(s1)-i < len(sub) { return -1 } else if bytes.Equal(s1[i:i+len(sub)], sub) { diff --git a/fs/fs.go b/fs/fs.go index c55d57002..8633844a6 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -262,7 +262,6 @@ func (fs *filesystem) Mount(ctx context.Context, mountpoint string, labels map[s // Also resolve and cache other layers in parallel preResolve := src[0] // TODO: should we pre-resolve blobs in other sources as well? for _, desc := range neighboringLayers(preResolve.Manifest, preResolve.Target) { - desc := desc go func() { // Avoids to get canceled by client. ctx := log.WithLogger(context.Background(), log.G(ctx).WithField("mountpoint", mountpoint)) @@ -416,7 +415,7 @@ func (fs *filesystem) check(ctx context.Context, l layer.Layer, labels map[strin retrynum = 1 rErr = fmt.Errorf("failed to refresh connection") ) - for retry := 0; retry < retrynum; retry++ { + for retry := range retrynum { log.G(ctx).Warnf("refreshing(%d)...", retry) for _, s := range src { err := l.Refresh(ctx, s.Hosts, s.Name, s.Target) diff --git a/fs/layer/layer.go b/fs/layer/layer.go index 11cd9be88..a4d57ba4d 100644 --- a/fs/layer/layer.go +++ b/fs/layer/layer.go @@ -158,7 +158,7 @@ func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, // the filesystem resolves and caches all layers in an image (not only queried one) in parallel, // before they are actually queried. layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL) - layerCache.OnEvicted = func(key string, value interface{}) { + layerCache.OnEvicted = func(key string, value any) { if err := value.(*layer).close(); err != nil { log.L.WithField("key", key).WithError(err).Warnf("failed to clean up layer") return @@ -169,7 +169,7 @@ func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, // blobCache caches resolved blobs for futural use. This is especially useful when a layer // isn't eStargz/stargz (the *layer object won't be created/cached in this case). blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL) - blobCache.OnEvicted = func(key string, value interface{}) { + blobCache.OnEvicted = func(key string, value any) { if err := value.(remote.Blob).Close(); err != nil { log.L.WithField("key", key).WithError(err).Warnf("failed to clean up blob") return @@ -212,16 +212,16 @@ func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache } bufPool := &sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, } dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry) - dCache.OnEvicted = func(key string, value interface{}) { + dCache.OnEvicted = func(key string, value any) { value.(*bytes.Buffer).Reset() bufPool.Put(value) } - fCache.OnEvicted = func(key string, value interface{}) { + fCache.OnEvicted = func(key string, value any) { value.(*os.File).Close() } // create a cache on an unique directory diff --git a/fs/layer/testutil.go b/fs/layer/testutil.go index a2609237c..de0c96539 100644 --- a/fs/layer/testutil.go +++ b/fs/layer/testutil.go @@ -252,10 +252,7 @@ func testPrefetch(t *TestRunner, factory metadata.Store, lc layerConfig) { if tt.chunkSize > 0 { chunkSize = tt.chunkSize } - minChunkSize := 0 - if tt.minChunkSize > 0 { - minChunkSize = tt.minChunkSize - } + minChunkSize := max(tt.minChunkSize, 0) sr, dgst, err := tutil.BuildEStargz(tt.in, tutil.WithEStargzOptions( estargz.WithChunkSize(chunkSize), @@ -1127,7 +1124,7 @@ func getDirentAndNode(t TestingT, root *node, path string) (ent fuse.DirEntry, n // get the target's parent directory. var eo fuse.EntryOut d := root - for _, name := range strings.Split(dir, "/") { + for name := range strings.SplitSeq(dir, "/") { if len(name) == 0 { continue } @@ -1169,7 +1166,7 @@ func getDirent(t TestingT, root *node, path string) (ent fuse.DirEntry, err erro // get the target's parent directory. var eo fuse.EntryOut d := root - for _, name := range strings.Split(dir, "/") { + for name := range strings.SplitSeq(dir, "/") { if len(name) == 0 { continue } diff --git a/fs/metrics/layer/metrics.go b/fs/metrics/layer/metrics.go index 00d9bbaf4..d5f1fdb8d 100644 --- a/fs/metrics/layer/metrics.go +++ b/fs/metrics/layer/metrics.go @@ -55,14 +55,11 @@ func (c *Controller) Collect(ch chan<- prometheus.Metric) { c.layerMu.RLock() wg := &sync.WaitGroup{} for mp, l := range c.layer { - mp, l := mp, l - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for _, e := range c.metrics { e.collect(mp, l, c.ns, ch) } - }() + }) } c.layerMu.RUnlock() wg.Wait() diff --git a/fs/reader/reader.go b/fs/reader/reader.go index 928ec4e6d..5a8fbb562 100644 --- a/fs/reader/reader.go +++ b/fs/reader/reader.go @@ -311,7 +311,7 @@ func NewReader(r metadata.Reader, cache cache.BlobCache, layerSha digest.Digest) r: r, cache: cache, bufPool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -642,12 +642,9 @@ func (sf *file) prefetchEntireFile(entireCacheID string, chunks []chunkData, tot batchCount := (totalSize + bufferSize - 1) / bufferSize - for batchIdx := int64(0); batchIdx < batchCount; batchIdx++ { + for batchIdx := range batchCount { batchStart := batchIdx * bufferSize - batchEnd := (batchIdx + 1) * bufferSize - if batchEnd > totalSize { - batchEnd = totalSize - } + batchEnd := min((batchIdx+1)*bufferSize, totalSize) var batchChunks []chunkData var batchOffset int64 @@ -841,7 +838,7 @@ func (gr *reader) verifyChunk(id uint32, p []byte, chunkDigestStr string) error } func genID(id uint32, offset, size int64) string { - sum := sha256.Sum256([]byte(fmt.Sprintf("%d-%d-%d", id, offset, size))) + sum := sha256.Sum256(fmt.Appendf(nil, "%d-%d-%d", id, offset, size)) return fmt.Sprintf("%x", sum) } diff --git a/fs/reader/testutil.go b/fs/reader/testutil.go index 89d858dc0..c85085427 100644 --- a/fs/reader/testutil.go +++ b/fs/reader/testutil.go @@ -27,9 +27,11 @@ import ( "compress/gzip" "fmt" "io" + "maps" "os" "path" "path/filepath" + "slices" "strings" "sync" "time" @@ -443,13 +445,7 @@ func (f *failIDVerifier) registerFails(fails []uint32) { func (f *failIDVerifier) verifier(id uint32, chunkDigest string) (digest.Verifier, error) { f.failsMu.Lock() defer f.failsMu.Unlock() - success := true - for _, n := range f.fails { - if n == id { - success = false - break - } - } + success := !slices.Contains(f.fails, id) return &testVerifier{success}, nil } @@ -495,12 +491,8 @@ func prepareMap(mr metadata.Reader, id uint32, p string) (off2id map[int64]uint3 retErr = err return false } - for k, v := range o2i { - off2id[k] = v - } - for k, v := range i2p { - id2path[k] = v - } + maps.Copy(off2id, o2i) + maps.Copy(id2path, i2p) return true }) if retErr != nil { @@ -548,7 +540,7 @@ func testFailReader(t *TestRunner, factory metadata.Store) { notexist := uint32(0) found := false - for i := uint32(0); i < 1000000; i++ { + for i := range uint32(1000000) { if _, err := gr.Metadata().GetAttr(i); err != nil { notexist, found = i, true break @@ -968,7 +960,7 @@ func testProcessBatchChunks(t *TestRunner) { createNormalChunks := func(chunkSize int64, totalChunks int) []chunkData { var chunks []chunkData - for i := 0; i < totalChunks; i++ { + for i := range totalChunks { chunks = append(chunks, chunkData{ offset: int64(i) * chunkSize, size: chunkSize, @@ -982,7 +974,7 @@ func testProcessBatchChunks(t *TestRunner) { createOverlappingChunks := func(chunkSize int64, totalChunks int) []chunkData { chunks := createNormalChunks(chunkSize, totalChunks) - for i := 0; i < totalChunks; i++ { + for i := range totalChunks { if i > 0 && i%10 == 0 { chunks = append(chunks, chunkData{ offset: int64(i)*chunkSize - chunkSize/2, diff --git a/fs/remote/blob.go b/fs/remote/blob.go index 4c443e671..dac9948c5 100644 --- a/fs/remote/blob.go +++ b/fs/remote/blob.go @@ -386,7 +386,7 @@ func (b *blob) fetchRange(allData map[region]io.Writer, opts *options) error { key := makeSyncKey(allData) fetched := make(map[region]bool) - _, err, shared := b.fetchedRegionGroup.Do(key, func() (interface{}, error) { + _, err, shared := b.fetchedRegionGroup.Do(key, func() (any, error) { return nil, b.fetchRegions(allData, fetched, opts) }) diff --git a/fs/remote/blob_test.go b/fs/remote/blob_test.go index a49426f8c..32282573a 100644 --- a/fs/remote/blob_test.go +++ b/fs/remote/blob_test.go @@ -512,7 +512,7 @@ func TestParallelDownloadingBehavior(t *testing.T) { wg.Add(routines) var contentBytes [3][]byte - for i := 0; i < routines; i++ { + for i := range routines { p := make([]byte, len(tst.content)) contentBytes[i] = p allData := make(map[region]io.Writer) @@ -673,7 +673,7 @@ type bodyConverter func(r io.ReadCloser) io.ReadCloser type exceptChunks []region type allowMultiRange bool -func multiRoundTripper(t *testing.T, contents []byte, opts ...interface{}) RoundTripFunc { +func multiRoundTripper(t *testing.T, contents []byte, opts ...any) RoundTripFunc { multiRangeEnable := true doNotFetch := []region{} convertBody := func(r io.ReadCloser) io.ReadCloser { return r } diff --git a/fs/remote/resolver.go b/fs/remote/resolver.go index 392cda087..c57e7898e 100644 --- a/fs/remote/resolver.go +++ b/fs/remote/resolver.go @@ -29,6 +29,7 @@ import ( "errors" "fmt" "io" + "maps" "math/big" "mime" "mime/multipart" @@ -333,9 +334,7 @@ func redirect(ctx context.Context, blobURL string, tr http.RoundTripper, timeout return "", nil, fmt.Errorf("failed to make request to the registry: %w", err) } req.Header = http.Header{} - for k, v := range header { - req.Header[k] = v - } + maps.Copy(req.Header, header) req.Close = false req.Header.Set("Range", "bytes=0-1") res, err := tr.RoundTrip(req) @@ -372,9 +371,7 @@ func getSize(ctx context.Context, url string, tr http.RoundTripper, timeout time return 0, err } req.Header = http.Header{} - for k, v := range header { - req.Header[k] = v - } + maps.Copy(req.Header, header) req.Close = false res, err := tr.RoundTrip(req) if err != nil { @@ -394,9 +391,7 @@ func getSize(ctx context.Context, url string, tr http.RoundTripper, timeout time return 0, fmt.Errorf("failed to make request to the registry: %w", err) } req.Header = http.Header{} - for k, v := range header { - req.Header[k] = v - } + maps.Copy(req.Header, header) req.Close = false req.Header.Set("Range", "bytes=0-1") res, err = tr.RoundTrip(req) @@ -471,9 +466,7 @@ func (f *httpFetcher) fetch(ctx context.Context, rs []region, retry bool) (multi return nil, err } req.Header = http.Header{} - for k, v := range f.header { - req.Header[k] = v - } + maps.Copy(req.Header, f.header) var ranges string for _, reg := range requests { ranges += fmt.Sprintf("%d-%d,", reg.b, reg.e) @@ -546,9 +539,7 @@ func (f *httpFetcher) check() error { return fmt.Errorf("check failed: failed to make request: %w", err) } req.Header = http.Header{} - for k, v := range f.header { - req.Header[k] = v - } + maps.Copy(req.Header, f.header) req.Close = false req.Header.Set("Range", "bytes=0-1") res, err := f.tr.RoundTrip(req) @@ -592,7 +583,7 @@ func (f *httpFetcher) refreshURL(ctx context.Context) error { } func (f *httpFetcher) genID(reg region) string { - sum := sha256.Sum256([]byte(fmt.Sprintf("%s-%d-%d", f.blobURL, reg.b, reg.e))) + sum := sha256.Sum256(fmt.Appendf(nil, "%s-%d-%d", f.blobURL, reg.b, reg.e)) return fmt.Sprintf("%x", sum) } diff --git a/metadata/testutil/testutil.go b/metadata/testutil/testutil.go index afb20876a..b164d54ef 100644 --- a/metadata/testutil/testutil.go +++ b/metadata/testutil/testutil.go @@ -312,7 +312,6 @@ func TestReader(t *TestRunner, factory ReaderFactory) { } for _, tt := range tests { for _, prefix := range allowedPrefix { - prefix := prefix for srcCompresionName, srcCompression := range srcCompressions { srcCompression := srcCompression() @@ -458,11 +457,11 @@ func newCalledTelemetry() (telemetry *metadata.Telemetry, check func() error) { func dumpNodes(t TestingT, r TestableReader, id uint32, level int) { if err := r.ForeachChild(id, func(name string, id uint32, mode os.FileMode) bool { - ind := "" - for i := 0; i < level; i++ { - ind += " " + var ind strings.Builder + for range level { + ind.WriteString(" ") } - t.Logf("%v+- [%d] %q : %v", ind, id, name, mode) + t.Logf("%v+- [%d] %q : %v", ind.String(), id, name, mode) dumpNodes(t, r, id, level+1) return true }); err != nil { diff --git a/nativeconverter/estargz/externaltoc/converter.go b/nativeconverter/estargz/externaltoc/converter.go index f6539b234..439f8630c 100644 --- a/nativeconverter/estargz/externaltoc/converter.go +++ b/nativeconverter/estargz/externaltoc/converter.go @@ -374,7 +374,7 @@ func createManifest(ctx context.Context, cs content.Store, config ocispec.ImageC Size: mfstSize, }, nil } -func writeJSON(ctx context.Context, cs content.Store, data interface{}, labels map[string]string) (digest.Digest, int64, error) { +func writeJSON(ctx context.Context, cs content.Store, data any, labels map[string]string) (digest.Digest, int64, error) { raw, err := json.Marshal(data) if err != nil { return "", 0, err diff --git a/service/keychain/cri/cri.go b/service/keychain/cri/cri.go index a1c87555f..8b1804c71 100644 --- a/service/keychain/cri/cri.go +++ b/service/keychain/cri/cri.go @@ -37,7 +37,7 @@ func NewCRIKeychain(ctx context.Context, connectCRI func() (runtime.ImageService server := &instrumentedService{config: make(map[string]*runtime.AuthConfig)} go func() { log.G(ctx).Debugf("Waiting for CRI service is started...") - for i := 0; i < 100; i++ { + for range 100 { client, err := connectCRI() if err == nil { server.criMu.Lock() diff --git a/service/keychain/kubeconfig/kubeconfig.go b/service/keychain/kubeconfig/kubeconfig.go index 78821b1f0..c804c9503 100644 --- a/service/keychain/kubeconfig/kubeconfig.go +++ b/service/keychain/kubeconfig/kubeconfig.go @@ -204,19 +204,19 @@ func (kc *keychain) startSyncSecrets(ctx context.Context, client kubernetes.Inte } }() informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { key, err := cache.MetaNamespaceKeyFunc(obj) if err == nil { queue.Add(key) } }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { key, err := cache.MetaNamespaceKeyFunc(new) if err == nil { queue.Add(key) } }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err == nil { queue.Add(key) diff --git a/service/plugincore/plugin.go b/service/plugincore/plugin.go index 9c3a2c728..b671a1d1c 100644 --- a/service/plugincore/plugin.go +++ b/service/plugincore/plugin.go @@ -61,7 +61,7 @@ func RegisterPlugin() { Type: ctdplugins.SnapshotPlugin, ID: "stargz", Config: &Config{}, - InitFn: func(ic *plugin.InitContext) (interface{}, error) { + InitFn: func(ic *plugin.InitContext) (any, error) { ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) ctx := ic.Context diff --git a/service/resolver/registry.go b/service/resolver/registry.go index 6ca780ed2..b9b1fc266 100644 --- a/service/resolver/registry.go +++ b/service/resolver/registry.go @@ -56,7 +56,7 @@ type MirrorConfig struct { RequestTimeoutSec int `toml:"request_timeout_sec" json:"request_timeout_sec"` // Header are additional headers to send to the server - Header map[string]interface{} `toml:"header" json:"header"` + Header map[string]any `toml:"header" json:"header"` } type Credential func(string, reference.Spec) (string, string, error) @@ -90,7 +90,7 @@ func RegistryHostsFromConfig(cfg Config, credsFuncs ...Credential) source.Regist switch value := ty.(type) { case string: header[key] = []string{value} - case []interface{}: + case []any: header[key], err = makeStringSlice(value, nil) if err != nil { return nil, err @@ -139,7 +139,7 @@ func multiCredsFuncs(ref reference.Spec, credsFuncs ...Credential) func(string) // makeStringSlice is a helper func to convert from []interface{} to []string. // Additionally an optional cb func may be passed to perform string mapping. // NOTE: Ported from https://github.com/containerd/containerd/blob/v1.6.9/remotes/docker/config/hosts.go#L516-L533 -func makeStringSlice(slice []interface{}, cb func(string) string) ([]string, error) { +func makeStringSlice(slice []any, cb func(string) string) ([]string, error) { out := make([]string, len(slice)) for i, value := range slice { str, ok := value.(string) diff --git a/store/manager.go b/store/manager.go index b6c75f7d9..0de12b763 100644 --- a/store/manager.go +++ b/store/manager.go @@ -168,12 +168,9 @@ func (r *LayerManager) getLayer(ctx context.Context, refspec reference.Spec, toc return nil, fmt.Errorf("failed to get manifest and config: %w", err) } for _, l := range manifest.Layers { - l := l // Resolve the layer - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Avoids to get canceled by client. ctx := context.Background() if err := r.resolveLayer(ctx, refspec, l); err != nil { @@ -189,7 +186,7 @@ func (r *LayerManager) getLayer(ctx context.Context, refspec reference.Spec, toc // Log this as preparation success log.G(ctx).WithField(remoteSnapshotLogKey, prepareSucceeded).Debugf("successfully resolved layer") resultChan <- gotL - }() + }) } allDone := make(chan struct{}) diff --git a/store/refs.go b/store/refs.go index fc737c49b..e001a513d 100644 --- a/store/refs.go +++ b/store/refs.go @@ -52,7 +52,7 @@ func newRefPool(ctx context.Context, root string, hosts source.RegistryHosts) (* refcounter: make(map[string]*releaser), } p.cache = cacheutil.NewLRUCache(refCacheEntry) - p.cache.OnEvicted = func(key string, value interface{}) { + p.cache.OnEvicted = func(key string, value any) { refspec := value.(reference.Spec) if err := os.RemoveAll(p.metadataDir(refspec)); err != nil { log.G(ctx).WithField("key", key).WithError(err).Warnf("failed to clean up ref") diff --git a/util/cacheutil/lrucache.go b/util/cacheutil/lrucache.go index b43b49194..fdb6938f2 100644 --- a/util/cacheutil/lrucache.go +++ b/util/cacheutil/lrucache.go @@ -31,13 +31,13 @@ type LRUCache struct { // OnEvicted optionally specifies a callback function to be // executed when an entry is purged from the cache. - OnEvicted func(key string, value interface{}) + OnEvicted func(key string, value any) } // NewLRUCache creates new lru cache. func NewLRUCache(maxEntries int) *LRUCache { inner := lru.New(maxEntries) - inner.OnEvicted = func(key lru.Key, value interface{}) { + inner.OnEvicted = func(key lru.Key, value any) { // Decrease the ref count incremented in Add(). // When nobody refers to this value, this value will be finalized via refCounter. value.(*refCounter).finalize() @@ -50,7 +50,7 @@ func NewLRUCache(maxEntries int) *LRUCache { // Get retrieves the specified object from the cache and increments the reference counter of the // target content. Client must call `done` callback to decrease the reference count when the value // will no longer be used. -func (c *LRUCache) Get(key string) (value interface{}, done func(), ok bool) { +func (c *LRUCache) Get(key string) (value any, done func(), ok bool) { c.mu.Lock() defer c.mu.Unlock() o, ok := c.cache.Get(key) @@ -66,7 +66,7 @@ func (c *LRUCache) Get(key string) (value interface{}, done func(), ok bool) { // If the specified content already exists in the cache, this sets `added` to false and returns // "already cached" content (i.e. doesn't replace the content with the new one). Client must call // `done` callback to decrease the counter when the value will no longer be used. -func (c *LRUCache) Add(key string, value interface{}) (cachedValue interface{}, done func(), added bool) { +func (c *LRUCache) Add(key string, value any) (cachedValue any, done func(), added bool) { c.mu.Lock() defer c.mu.Unlock() if o, ok := c.cache.Get(key); ok { @@ -103,10 +103,10 @@ func (c *LRUCache) decreaseOnceFunc(rc *refCounter) func() { } type refCounter struct { - onEvicted func(key string, value interface{}) + onEvicted func(key string, value any) key string - v interface{} + v any refCounts int64 mu sync.Mutex diff --git a/util/cacheutil/lrucache_test.go b/util/cacheutil/lrucache_test.go index 76b203d57..eba55ecff 100644 --- a/util/cacheutil/lrucache_test.go +++ b/util/cacheutil/lrucache_test.go @@ -65,7 +65,7 @@ func TestLRUGet(t *testing.T) { func TestLRURemove(t *testing.T) { var evicted []string c := NewLRUCache(2) - c.OnEvicted = func(key string, value interface{}) { + c.OnEvicted = func(key string, value any) { evicted = append(evicted, key) } key1, value1 := "key1", "abcd1" @@ -95,7 +95,7 @@ func TestLRURemove(t *testing.T) { func TestLRUEviction(t *testing.T) { var evicted []string c := NewLRUCache(2) - c.OnEvicted = func(key string, value interface{}) { + c.OnEvicted = func(key string, value any) { evicted = append(evicted, key) } key1, value1 := "key1", "abcd1" @@ -107,7 +107,7 @@ func TestLRUEviction(t *testing.T) { if len(evicted) != 0 { t.Fatalf("no content must be evicted after addition") } - for i := 0; i < 2; i++ { + for i := range 2 { c.Add(fmt.Sprintf("key-add-%d", i), fmt.Sprintf("abcd-add-%d", i)) } if len(evicted) != 0 { diff --git a/util/cacheutil/ttlcache.go b/util/cacheutil/ttlcache.go index a55dc254e..7be125ee9 100644 --- a/util/cacheutil/ttlcache.go +++ b/util/cacheutil/ttlcache.go @@ -30,7 +30,7 @@ type TTLCache struct { // OnEvicted optionally specifies a callback function to be // executed when an entry is purged from the cache. - OnEvicted func(key string, value interface{}) + OnEvicted func(key string, value any) } // NewTTLCache creates a new ttl-based cache. @@ -44,7 +44,7 @@ func NewTTLCache(ttl time.Duration) *TTLCache { // Get retrieves the specified object from the cache and increments the reference counter of the // target content. Client must call `done` callback to decrease the reference count when the value // will no longer be used. -func (c *TTLCache) Get(key string) (value interface{}, done func(bool), ok bool) { +func (c *TTLCache) Get(key string) (value any, done func(bool), ok bool) { c.mu.Lock() defer c.mu.Unlock() rc, ok := c.m[key] @@ -59,7 +59,7 @@ func (c *TTLCache) Get(key string) (value interface{}, done func(bool), ok bool) // If the specified content already exists in the cache, this sets `added` to false and returns // "already cached" content (i.e. doesn't replace the content with the new one). Client must call // `done` callback to decrease the counter when the value will no longer be used. -func (c *TTLCache) Add(key string, value interface{}) (cachedValue interface{}, done func(bool), added bool) { +func (c *TTLCache) Add(key string, value any) (cachedValue any, done func(bool), added bool) { c.mu.Lock() defer c.mu.Unlock() if rc, ok := c.m[key]; ok { diff --git a/util/cacheutil/ttlcache_test.go b/util/cacheutil/ttlcache_test.go index 8a5d0494f..49bbcf5e0 100644 --- a/util/cacheutil/ttlcache_test.go +++ b/util/cacheutil/ttlcache_test.go @@ -66,7 +66,7 @@ func TestTTLGet(t *testing.T) { func TestTTLRemove(t *testing.T) { var evicted []string c := NewTTLCache(time.Hour) - c.OnEvicted = func(key string, value interface{}) { + c.OnEvicted = func(key string, value any) { evicted = append(evicted, key) } key1, value1 := "key1", "abcd1" @@ -96,7 +96,7 @@ func TestTTLRemove(t *testing.T) { func TestTTLRemoveOverwritten(t *testing.T) { var evicted []string c := NewTTLCache(3 * time.Second) - c.OnEvicted = func(key string, value interface{}) { + c.OnEvicted = func(key string, value any) { evicted = append(evicted, key) } key1, value1 := "key1", "abcd1" @@ -135,7 +135,7 @@ func TestTTLEviction(t *testing.T) { evictedMu sync.Mutex ) c := NewTTLCache(time.Second) - c.OnEvicted = func(key string, value interface{}) { + c.OnEvicted = func(key string, value any) { evictedMu.Lock() evicted = append(evicted, key) evictedMu.Unlock() @@ -182,7 +182,7 @@ func TestTTLEviction(t *testing.T) { func TestTTLQuickDone(t *testing.T) { var evicted []string c := NewTTLCache(time.Hour) - c.OnEvicted = func(key string, value interface{}) { + c.OnEvicted = func(key string, value any) { evicted = append(evicted, key) } key1, value1 := "key1", "abcd1"