Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions analyzer/analyzer.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func Analyze(ctx context.Context, client *containerd.Client, ref string, opts ..

// Create the container and the task
var container containerd.Container
for i := 0; i < 3; i++ {
for range 3 {
id := xid.New().String()
var s runtimespec.Spec
container, err = client.NewContainer(ctx, id,
Expand Down Expand Up @@ -221,8 +221,8 @@ func Analyze(ctx context.Context, client *containerd.Client, ref string, opts ..
prePaths := preMonitor.GetPaths()
for _, path := range prePaths {
cleanPath := path
if strings.HasPrefix(path, target) {
cleanPath = strings.TrimPrefix(path, target)
if after, ok := strings.CutPrefix(path, target); ok {
cleanPath = after
}
if err := rc.Record(cleanPath); err != nil {
log.G(ctx).WithError(err).Debugf("failed to record pre-container path %q", cleanPath)
Expand Down
1 change: 0 additions & 1 deletion analyzer/recorder/recorder.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ func imageRecorderFromManifest(ctx context.Context, cs content.Store, manifestDe
var eg errgroup.Group
filesMap := make([]map[string]struct{}, len(manifest.Layers))
for i, desc := range manifest.Layers {
i, desc := i, desc
filesMap[i] = make(map[string]struct{})

// Create the index from the layer blob.
Expand Down
1 change: 0 additions & 1 deletion analyzer/recorder/recorder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,6 @@ func TestNodeIndex(t *testing.T) {
ctx := context.Background()
for _, tt := range tests {
for _, prefix := range allowedPrefix {
prefix := prefix
for mediatype, cWrapper := range compressWrappers {
t.Run(tt.name+":"+mediatype+",prefix="+prefix, func(t *testing.T) {
var layers []ocispec.Descriptor
Expand Down
6 changes: 3 additions & 3 deletions cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache
bufPool := config.BufPool
if bufPool == nil {
bufPool = &sync.Pool{
New: func() interface{} {
New: func() any {
return new(bytes.Buffer)
},
}
Expand All @@ -146,7 +146,7 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache
maxEntry = defaultMaxLRUCacheEntry
}
dataCache = cacheutil.NewLRUCache(maxEntry)
dataCache.OnEvicted = func(key string, value interface{}) {
dataCache.OnEvicted = func(key string, value any) {
value.(*bytes.Buffer).Reset()
bufPool.Put(value)
}
Expand All @@ -158,7 +158,7 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache
maxEntry = defaultMaxCacheFds
}
fdCache = cacheutil.NewLRUCache(maxEntry)
fdCache.OnEvicted = func(key string, value interface{}) {
fdCache.OnEvicted = func(key string, value any) {
value.(*os.File).Close()
}
}
Expand Down
7 changes: 2 additions & 5 deletions cmd/containerd-stargz-grpc/db/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ func (r *reader) init(decompressedR io.Reader, rOpts metadata.Options) (retErr e

// Initialize root node
var ok bool
for i := 0; i < 100; i++ {
for range 100 {
fsID := xid.New().String()
if err := r.initRootNode(fsID); err != nil {
if errors.Is(err, errbolt.ErrBucketExists) {
Expand Down Expand Up @@ -930,10 +930,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
}

compressedBytesRemain := fr.nextOffset - ent.offset
bufSize := int(2 << 20)
if bufSize > int(compressedBytesRemain) {
bufSize = int(compressedBytesRemain)
}
bufSize := min(int(2<<20), int(compressedBytesRemain))

br := bufio.NewReaderSize(io.NewSectionReader(fr.r.sr, ent.offset, compressedBytesRemain), bufSize)
if _, err := br.Peek(bufSize); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cmd/containerd-stargz-grpc/ipfs/resolvehandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,6 @@ func (f *fetcher) Check() error {
}

func (f *fetcher) GenID(off int64, size int64) string {
sum := sha256.Sum256([]byte(fmt.Sprintf("%s-%d-%d", f.cid, off, size)))
sum := sha256.Sum256(fmt.Appendf(nil, "%s-%d-%d", f.cid, off, size))
return fmt.Sprintf("%x", sum)
}
5 changes: 2 additions & 3 deletions cmd/stargz-store/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"fmt"
"io"
golog "log"
"maps"
"math/rand"
"net"
"os"
Expand Down Expand Up @@ -250,9 +251,7 @@ func (sk *storeKeychain) add(data []byte) error {
if sk.config == nil {
sk.config = make(map[string]authConfig)
}
for k, c := range conf {
sk.config[k] = c
}
maps.Copy(sk.config, conf)
sk.configMu.Unlock()
return nil
}
Expand Down
1 change: 0 additions & 1 deletion estargz/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,6 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
var mu sync.Mutex
var eg errgroup.Group
for i, parts := range tarParts {
i, parts := i, parts
// builds verifiable stargz sub-blobs
eg.Go(func() error {
esgzFile, err := layerFiles.TempFile("", "esgzdata")
Expand Down
3 changes: 0 additions & 3 deletions estargz/build_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -354,11 +354,8 @@ func TestSort(t *testing.T) {
}
for _, tt := range tests {
for _, srcCompression := range srcCompressions {
srcCompression := srcCompression
for _, logprefix := range allowedPrefix {
logprefix := logprefix
for _, tarprefix := range allowedPrefix {
tarprefix := tarprefix
t.Run(fmt.Sprintf("%s-logprefix=%q-tarprefix=%q-src=%d", tt.name, logprefix, tarprefix, srcCompression), func(t *testing.T) {
// Sort tar file
var pfiles []string
Expand Down
5 changes: 1 addition & 4 deletions estargz/estargz_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,7 @@ func regularFileReader(name string, size int64, chunkSize int64) (*TOCEntry, *Re
var written int64
for written < size {
remain := size - written
cs := chunkSize
if remain < cs {
cs = remain
}
cs := min(remain, chunkSize)
ent.ChunkSize = cs
ent.ChunkOffset = written
chunks = append(chunks, ent)
Expand Down
2 changes: 1 addition & 1 deletion estargz/gzip_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ func checkLegacyFooter(t *testing.T, off int64) {
func legacyFooterBytes(tocOff int64) []byte {
buf := bytes.NewBuffer(make([]byte, 0, legacyFooterSize))
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression)
gz.Extra = []byte(fmt.Sprintf("%016xSTARGZ", tocOff))
gz.Extra = fmt.Appendf(nil, "%016xSTARGZ", tocOff)
gz.Close()
if buf.Len() != legacyFooterSize {
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), legacyFooterSize))
Expand Down
21 changes: 4 additions & 17 deletions estargz/testutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,15 +187,10 @@ func testBuild(t *TestRunner, controllers ...TestingControllerFactory) {
tt.minChunkSize = []int{0}
}
for _, srcCompression := range srcCompressions {
srcCompression := srcCompression
for _, newCL := range controllers {
newCL := newCL
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat
for _, prefix := range allowedPrefix {
prefix := prefix
for _, minChunkSize := range tt.minChunkSize {
minChunkSize := minChunkSize
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) {
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
// Test divideEntries()
Expand Down Expand Up @@ -675,15 +670,10 @@ func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory)
tt.minChunkSize = []int{0}
}
for _, srcCompression := range srcCompressions {
srcCompression := srcCompression
for _, newCL := range controllers {
newCL := newCL
for _, prefix := range allowedPrefix {
prefix := prefix
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat
for _, minChunkSize := range tt.minChunkSize {
minChunkSize := minChunkSize
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) {
// Get original tar file and chunk digests
dgstMap := make(map[string]digest.Digest)
Expand Down Expand Up @@ -1488,11 +1478,8 @@ func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) {

for _, tt := range tests {
for _, newCL := range controllers {
newCL := newCL
for _, prefix := range allowedPrefix {
prefix := prefix
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat
for _, lossless := range []bool{true, false} {
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) {
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
Expand Down Expand Up @@ -2072,7 +2059,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format
return f(tw, prefix, format)
}

func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...any) *io.SectionReader {
format := tar.FormatUnknown
for _, opt := range opts {
switch v := opt.(type) {
Expand All @@ -2096,7 +2083,7 @@ func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *
return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
}

func dir(name string, opts ...interface{}) tarEntry {
func dir(name string, opts ...any) tarEntry {
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
var o owner
mode := os.FileMode(0755)
Expand Down Expand Up @@ -2137,7 +2124,7 @@ type owner struct {
gid int
}

func file(name, contents string, opts ...interface{}) tarEntry {
func file(name, contents string, opts ...any) tarEntry {
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
var xattrs xAttr
var o owner
Expand Down Expand Up @@ -2349,7 +2336,7 @@ func (f fileInfoOnlyMode) Size() int64 { return 0 }
func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
func (f fileInfoOnlyMode) Sys() interface{} { return nil }
func (f fileInfoOnlyMode) Sys() any { return nil }

func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) {
if len(streams) == 0 {
Expand Down
2 changes: 1 addition & 1 deletion estargz/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
func (fi fileInfo) Size() int64 { return fi.e.Size }
func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
func (fi fileInfo) Sys() interface{} { return fi.e }
func (fi fileInfo) Sys() any { return fi.e }
func (fi fileInfo) Mode() (m os.FileMode) {
// TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() &
Expand Down
8 changes: 3 additions & 5 deletions estargz/zstdchunked/zstdchunked_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"crypto/sha256"
"fmt"
"io"
"sort"
"slices"
"testing"

"github.com/containerd/stargz-snapshotter/estargz"
Expand Down Expand Up @@ -79,9 +79,7 @@ func (zc *zstdController) TestStreams(t estargz.TestingT, b []byte, streams []in

// We expect the last offset is footer offset.
// 8 is the size of the zstd skippable frame header + the frame size (see WriteTOCAndFooter)
sort.Slice(streams, func(i, j int) bool {
return streams[i] < streams[j]
})
slices.Sort(streams)
streams[len(streams)-1] = streams[len(streams)-1] - 8
wants := map[int64]struct{}{}
for _, s := range streams {
Expand Down Expand Up @@ -127,7 +125,7 @@ func (zc *zstdController) TestStreams(t estargz.TestingT, b []byte, streams []in
}

func nextIndex(s1, sub []byte) int {
for i := 0; i < len(s1); i++ {
for i := range s1 {
if len(s1)-i < len(sub) {
return -1
} else if bytes.Equal(s1[i:i+len(sub)], sub) {
Expand Down
3 changes: 1 addition & 2 deletions fs/fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,6 @@ func (fs *filesystem) Mount(ctx context.Context, mountpoint string, labels map[s
// Also resolve and cache other layers in parallel
preResolve := src[0] // TODO: should we pre-resolve blobs in other sources as well?
for _, desc := range neighboringLayers(preResolve.Manifest, preResolve.Target) {
desc := desc
go func() {
// Avoids to get canceled by client.
ctx := log.WithLogger(context.Background(), log.G(ctx).WithField("mountpoint", mountpoint))
Expand Down Expand Up @@ -416,7 +415,7 @@ func (fs *filesystem) check(ctx context.Context, l layer.Layer, labels map[strin
retrynum = 1
rErr = fmt.Errorf("failed to refresh connection")
)
for retry := 0; retry < retrynum; retry++ {
for retry := range retrynum {
log.G(ctx).Warnf("refreshing(%d)...", retry)
for _, s := range src {
err := l.Refresh(ctx, s.Hosts, s.Name, s.Target)
Expand Down
10 changes: 5 additions & 5 deletions fs/layer/layer.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager,
// the filesystem resolves and caches all layers in an image (not only queried one) in parallel,
// before they are actually queried.
layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL)
layerCache.OnEvicted = func(key string, value interface{}) {
layerCache.OnEvicted = func(key string, value any) {
if err := value.(*layer).close(); err != nil {
log.L.WithField("key", key).WithError(err).Warnf("failed to clean up layer")
return
Expand All @@ -169,7 +169,7 @@ func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager,
// blobCache caches resolved blobs for futural use. This is especially useful when a layer
// isn't eStargz/stargz (the *layer object won't be created/cached in this case).
blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL)
blobCache.OnEvicted = func(key string, value interface{}) {
blobCache.OnEvicted = func(key string, value any) {
if err := value.(remote.Blob).Close(); err != nil {
log.L.WithField("key", key).WithError(err).Warnf("failed to clean up blob")
return
Expand Down Expand Up @@ -212,16 +212,16 @@ func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache
}

bufPool := &sync.Pool{
New: func() interface{} {
New: func() any {
return new(bytes.Buffer)
},
}
dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry)
dCache.OnEvicted = func(key string, value interface{}) {
dCache.OnEvicted = func(key string, value any) {
value.(*bytes.Buffer).Reset()
bufPool.Put(value)
}
fCache.OnEvicted = func(key string, value interface{}) {
fCache.OnEvicted = func(key string, value any) {
value.(*os.File).Close()
}
// create a cache on an unique directory
Expand Down
9 changes: 3 additions & 6 deletions fs/layer/testutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,10 +252,7 @@ func testPrefetch(t *TestRunner, factory metadata.Store, lc layerConfig) {
if tt.chunkSize > 0 {
chunkSize = tt.chunkSize
}
minChunkSize := 0
if tt.minChunkSize > 0 {
minChunkSize = tt.minChunkSize
}
minChunkSize := max(tt.minChunkSize, 0)
sr, dgst, err := tutil.BuildEStargz(tt.in,
tutil.WithEStargzOptions(
estargz.WithChunkSize(chunkSize),
Expand Down Expand Up @@ -1127,7 +1124,7 @@ func getDirentAndNode(t TestingT, root *node, path string) (ent fuse.DirEntry, n
// get the target's parent directory.
var eo fuse.EntryOut
d := root
for _, name := range strings.Split(dir, "/") {
for name := range strings.SplitSeq(dir, "/") {
if len(name) == 0 {
continue
}
Expand Down Expand Up @@ -1169,7 +1166,7 @@ func getDirent(t TestingT, root *node, path string) (ent fuse.DirEntry, err erro
// get the target's parent directory.
var eo fuse.EntryOut
d := root
for _, name := range strings.Split(dir, "/") {
for name := range strings.SplitSeq(dir, "/") {
if len(name) == 0 {
continue
}
Expand Down
7 changes: 2 additions & 5 deletions fs/metrics/layer/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,11 @@ func (c *Controller) Collect(ch chan<- prometheus.Metric) {
c.layerMu.RLock()
wg := &sync.WaitGroup{}
for mp, l := range c.layer {
mp, l := mp, l
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
for _, e := range c.metrics {
e.collect(mp, l, c.ns, ch)
}
}()
})
}
c.layerMu.RUnlock()
wg.Wait()
Expand Down
Loading
Loading