@@ -126,21 +126,28 @@ type GSUtilRemoteCache struct {
126
126
func (rs GSUtilRemoteCache ) ExistingPackages (pkgs []* Package ) (map [* Package ]struct {}, error ) {
127
127
fmt .Printf ("☁️ checking remote cache for past build artifacts for %d packages\n " , len (pkgs ))
128
128
129
- packageToURLMap := make (map [* Package ]string )
129
+ // Map to store both .tar.gz and .tar URLs for each package
130
+ type urlPair struct {
131
+ gzURL string
132
+ tarURL string
133
+ }
134
+ packageToURLMap := make (map [* Package ]urlPair )
135
+
136
+ // Create a list of all possible URLs
137
+ var urls []string
130
138
for _ , p := range pkgs {
131
139
version , err := p .Version ()
132
140
if err != nil {
133
141
log .WithField ("package" , p .FullName ()).Debug ("Failed to get version for package. Will not check remote cache for package." )
134
142
continue
135
143
}
136
144
137
- url := fmt .Sprintf ("gs://%s/%s" , rs .BucketName , fmt .Sprintf ("%s.tar.gz" , version ))
138
- packageToURLMap [p ] = url
139
- }
140
-
141
- urls := make ([]string , 0 , len (packageToURLMap ))
142
- for _ , url := range packageToURLMap {
143
- urls = append (urls , url )
145
+ pair := urlPair {
146
+ gzURL : fmt .Sprintf ("gs://%s/%s.tar.gz" , rs .BucketName , version ),
147
+ tarURL : fmt .Sprintf ("gs://%s/%s.tar" , rs .BucketName , version ),
148
+ }
149
+ packageToURLMap [p ] = pair
150
+ urls = append (urls , pair .gzURL , pair .tarURL )
144
151
}
145
152
146
153
if len (urls ) == 0 {
@@ -163,16 +170,29 @@ func (rs GSUtilRemoteCache) ExistingPackages(pkgs []*Package) (map[*Package]stru
163
170
164
171
existingURLs := parseGSUtilStatOutput (bytes .NewReader (stdoutBuffer .Bytes ()))
165
172
existingPackages := make (map [* Package ]struct {})
173
+
166
174
for _ , p := range pkgs {
167
- url := packageToURLMap [p ]
168
- if _ , exists := existingURLs [url ]; exists {
175
+ urls := packageToURLMap [p ]
176
+ if _ , exists := existingURLs [urls .gzURL ]; exists {
177
+ existingPackages [p ] = struct {}{}
178
+ continue
179
+ }
180
+ if _ , exists := existingURLs [urls .tarURL ]; exists {
169
181
existingPackages [p ] = struct {}{}
170
182
}
171
183
}
172
184
173
185
return existingPackages , nil
174
186
}
175
187
188
+ // Helper function to get all possible artifact URLs for a package
189
+ func getPackageArtifactURLs (bucketName , version string ) []string {
190
+ return []string {
191
+ fmt .Sprintf ("gs://%s/%s.tar.gz" , bucketName , version ),
192
+ fmt .Sprintf ("gs://%s/%s.tar" , bucketName , version ),
193
+ }
194
+ }
195
+
176
196
// Download makes a best-effort attempt at downloading previously cached build artifacts
177
197
func (rs GSUtilRemoteCache ) Download (dst Cache , pkgs []* Package ) error {
178
198
fmt .Printf ("☁️ downloading %d cached build artifacts\n " , len (pkgs ))
@@ -305,15 +325,23 @@ func NewS3RemoteCache(bucketName string, cfg *aws.Config) (*S3RemoteCache, error
305
325
306
326
// ExistingPackages returns existing cached build artifacts in the remote cache
307
327
func (rs * S3RemoteCache ) ExistingPackages (pkgs []* Package ) (map [* Package ]struct {}, error ) {
308
- packagesToKeys := make (map [* Package ]string )
328
+ type keyPair struct {
329
+ gzKey string
330
+ tarKey string
331
+ }
332
+
333
+ packagesToKeys := make (map [* Package ]keyPair )
309
334
for _ , p := range pkgs {
310
335
version , err := p .Version ()
311
336
if err != nil {
312
337
log .WithField ("package" , p .FullName ()).Debug ("Failed to get version for package. Will not check remote cache for package." )
313
338
continue
314
339
}
315
340
316
- packagesToKeys [p ] = filepath .Base (fmt .Sprintf ("%s.tar.gz" , version ))
341
+ packagesToKeys [p ] = keyPair {
342
+ gzKey : filepath .Base (fmt .Sprintf ("%s.tar.gz" , version )),
343
+ tarKey : filepath .Base (fmt .Sprintf ("%s.tar" , version )),
344
+ }
317
345
}
318
346
319
347
if len (packagesToKeys ) == 0 {
@@ -322,36 +350,57 @@ func (rs *S3RemoteCache) ExistingPackages(pkgs []*Package) (map[*Package]struct{
322
350
log .Debugf ("Checking if %d packages exist in the remote cache using s3" , len (packagesToKeys ))
323
351
324
352
ch := make (chan * Package , len (packagesToKeys ))
325
-
326
353
existingPackages := make (map [* Package ]struct {})
354
+ var mu sync.Mutex // Mutex for safe concurrent map access
327
355
wg := sync.WaitGroup {}
328
356
329
357
ctx := context .TODO ()
330
- for pkg , key := range packagesToKeys {
358
+ for pkg , keys := range packagesToKeys {
331
359
wg .Add (1 )
332
- go func (pkg * Package , key string ) {
360
+ go func (pkg * Package , keys keyPair ) {
333
361
defer wg .Done ()
334
362
335
- stat , err := rs .hasObject (ctx , key )
336
- if err != nil {
337
- log .WithField ("bucket" , rs .BucketName ).WithField ("key" , key ).Debugf ("Failed to check for remote cached object: %s" , err )
363
+ // Check for .tar.gz first
364
+ if stat , err := rs .hasObject (ctx , keys .gzKey ); err != nil {
365
+ log .WithField ("bucket" , rs .BucketName ).WithField ("key" , keys .gzKey ).
366
+ Debugf ("Failed to check for remote cached object: %s" , err )
367
+ } else if stat {
368
+ mu .Lock ()
369
+ existingPackages [pkg ] = struct {}{}
370
+ mu .Unlock ()
371
+ return
338
372
}
339
- if stat {
340
- ch <- pkg
373
+
374
+ // If .tar.gz doesn't exist, check for .tar
375
+ if stat , err := rs .hasObject (ctx , keys .tarKey ); err != nil {
376
+ log .WithField ("bucket" , rs .BucketName ).WithField ("key" , keys .tarKey ).
377
+ Debugf ("Failed to check for remote cached object: %s" , err )
378
+ } else if stat {
379
+ mu .Lock ()
380
+ existingPackages [pkg ] = struct {}{}
381
+ mu .Unlock ()
341
382
}
342
- }(pkg , key )
383
+ }(pkg , keys )
343
384
}
344
385
wg .Wait ()
345
- close (ch )
346
386
347
- for p := range ch {
348
- existingPackages [p ] = struct {}{}
349
- }
350
- log .WithField ("bucket" , rs .BucketName ).Debugf ("%d/%d packages found in remote cache" , len (existingPackages ), len (packagesToKeys ))
387
+ log .WithField ("bucket" , rs .BucketName ).
388
+ Debugf ("%d/%d packages found in remote cache" , len (existingPackages ), len (packagesToKeys ))
351
389
352
390
return existingPackages , nil
353
391
}
354
392
393
+ // Helper method to consolidate object existence check logic
394
+ func (rs * S3RemoteCache ) checkObjectExists (ctx context.Context , key string ) bool {
395
+ stat , err := rs .hasObject (ctx , key )
396
+ if err != nil {
397
+ log .WithField ("bucket" , rs .BucketName ).WithField ("key" , key ).
398
+ Debugf ("Failed to check for remote cached object: %s" , err )
399
+ return false
400
+ }
401
+ return stat
402
+ }
403
+
355
404
// Download makes a best-effort attempt at downloading previously cached build artifacts for the given packages
356
405
// in their current version. A cache miss (i.e. a build artifact not being available) does not constitute an
357
406
// error. Get should try and download as many artifacts as possible.
0 commit comments