@@ -16,6 +16,7 @@ import (
1616
1717 "github.com/aws/aws-sdk-go-v2/aws"
1818 "github.com/aws/aws-sdk-go-v2/config"
19+ "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
1920 "github.com/aws/aws-sdk-go-v2/service/s3"
2021 "github.com/aws/aws-sdk-go-v2/service/s3/types"
2122 "github.com/aws/aws-sdk-go-v2/service/sts"
@@ -250,10 +251,10 @@ type S3RemoteCache struct {
250251func NewS3RemoteCache (bucketName string , cfg * aws.Config ) (* S3RemoteCache , error ) {
251252 if cfg == nil {
252253 v , err := config .LoadDefaultConfig (context .TODO ())
253- cfg = & v
254254 if err != nil {
255255 return nil , fmt .Errorf ("cannot load s3 config: %s" , err )
256256 }
257+ cfg = & v
257258 }
258259 s3Client := s3 .NewFromConfig (* cfg )
259260
@@ -287,7 +288,7 @@ func (rs *S3RemoteCache) ExistingPackages(pkgs []*Package) (map[*Package]struct{
287288 continue
288289 }
289290
290- packagesToKeys [p ] = fmt .Sprintf ("%s.tar.gz" , version )
291+ packagesToKeys [p ] = filepath . Base ( fmt .Sprintf ("%s.tar.gz" , version ) )
291292 }
292293
293294 if len (packagesToKeys ) == 0 {
@@ -296,12 +297,12 @@ func (rs *S3RemoteCache) ExistingPackages(pkgs []*Package) (map[*Package]struct{
296297 log .Debugf ("Checking if %d packages exist in the remote cache using s3" , len (packagesToKeys ))
297298
298299 ch := make (chan * Package , len (packagesToKeys ))
299- defer close (ch )
300300
301301 existingPackages := make (map [* Package ]struct {})
302302 wg := sync.WaitGroup {}
303303
304304 for pkg , key := range packagesToKeys {
305+ wg .Add (1 )
305306 go func (pkg * Package , key string ) {
306307 defer wg .Done ()
307308
@@ -311,25 +312,108 @@ func (rs *S3RemoteCache) ExistingPackages(pkgs []*Package) (map[*Package]struct{
311312 ch <- pkg
312313 }
313314 }(pkg , key )
314-
315- wg .Add (1 )
316315 }
317316 wg .Wait ()
317+ close (ch )
318+
319+ for p := range ch {
320+ existingPackages [p ] = struct {}{}
321+ }
318322
319323 return existingPackages , nil
320324}
321325
322326// Download makes a best-effort attempt at downloading previously cached build artifacts for the given packages
323327// in their current version. A cache miss (i.e. a build artifact not being available) does not constitute an
324328// error. Get should try and download as many artifacts as possible.
325- func (s3 * S3RemoteCache ) Download (dst Cache , pkgs []* Package ) error {
326- panic ("not implemented" ) // TODO: Implement
329+ func (rs * S3RemoteCache ) Download (dst Cache , pkgs []* Package ) error {
330+ fmt .Printf ("☁️ downloading %d cached build artifacts from s3 remote cache\n " , len (pkgs ))
331+ var (
332+ files []string
333+ dest string
334+ )
335+
336+ for _ , pkg := range pkgs {
337+ fn , exists := dst .Location (pkg )
338+ if exists {
339+ continue
340+ }
341+
342+ if dest == "" {
343+ dest = filepath .Dir (fn )
344+ } else if dest != filepath .Dir (fn ) {
345+ return xerrors .Errorf ("s3 cache only supports one target folder, not %s and %s" , dest , filepath .Dir (fn ))
346+ }
347+
348+ files = append (files , fmt .Sprintf ("%s/%s" , rs .BucketName , strings .TrimLeft (fn , "/" )))
349+ }
350+
351+ wg := sync.WaitGroup {}
352+
353+ for _ , file := range files {
354+ wg .Add (1 )
355+
356+ go func (file string ) {
357+ defer wg .Done ()
358+
359+ key := filepath .Base (file )
360+ fields := log.Fields {
361+ "key" : key ,
362+ "bucket" : rs .BucketName ,
363+ "region" : rs .s3Config .Region ,
364+ }
365+ log .WithFields (fields ).Debug ("downloading object from s3" )
366+ len , err := rs .getObject (context .TODO (), key , fmt .Sprintf ("%s/%s" , dest , key ))
367+ if err != nil {
368+ log .WithFields (fields ).Warnf ("failed to download and store object %s from s3: %s" , key , err )
369+ } else {
370+ log .WithFields (fields ).Debugf ("downloaded %d byte object from s3 to %s" , len , file )
371+ }
372+
373+ }(file )
374+ }
375+ wg .Wait ()
376+
377+ return nil
327378}
328379
329380// Upload makes a best effort to upload the build arfitacts to a remote cache. If uploading an artifact fails, that
330381// does not constitute an error.
331- func (s3 * S3RemoteCache ) Upload (src Cache , pkgs []* Package ) error {
332- panic ("not implemented" ) // TODO: Implement
382+ func (rs * S3RemoteCache ) Upload (src Cache , pkgs []* Package ) error {
383+ var files []string
384+ for _ , pkg := range pkgs {
385+ file , exists := src .Location (pkg )
386+ if ! exists {
387+ continue
388+ }
389+ files = append (files , file )
390+ }
391+ fmt .Fprintf (os .Stdout , "☁️ uploading %d build artifacts to s3 remote cache\n " , len (files ))
392+
393+ wg := sync.WaitGroup {}
394+
395+ for _ , file := range files {
396+ wg .Add (1 )
397+ go func (file string ) {
398+ defer wg .Done ()
399+ key := filepath .Base (file )
400+ fields := log.Fields {
401+ "key" : key ,
402+ "bucket" : rs .BucketName ,
403+ "region" : rs .s3Config .Region ,
404+ }
405+ log .WithFields (fields ).Debug ("uploading object to s3" )
406+ res , err := rs .uploadObject (context .TODO (), filepath .Base (file ), file )
407+ if err != nil {
408+ log .WithFields (fields ).Warnf ("Failed to upload object to s3: %s" , err )
409+ } else {
410+ log .WithFields (fields ).Debugf ("completed upload to %s" , res .Location )
411+ }
412+ }(file )
413+ }
414+ wg .Wait ()
415+
416+ return nil
333417}
334418
335419func (rs * S3RemoteCache ) hasBucket (ctx context.Context ) (bool , error ) {
@@ -356,11 +440,10 @@ func (rs *S3RemoteCache) hasBucket(ctx context.Context) (bool, error) {
356440}
357441
358442func (rs * S3RemoteCache ) hasObject (ctx context.Context , key string ) (bool , error ) {
359- cfg := * rs .s3Config
360443 fields := log.Fields {
361444 "key" : key ,
362445 "bucket" : rs .BucketName ,
363- "region" : cfg .Region ,
446+ "region" : rs . s3Config .Region ,
364447 }
365448 log .WithFields (fields ).Debugf ("Checking s3 for cached package" )
366449
@@ -377,7 +460,10 @@ func (rs *S3RemoteCache) hasObject(ctx context.Context, key string) (bool, error
377460 }
378461
379462 // We've received an error that's not a simple missing key error. Collect more information
380- _ , _ = rs .hasBucket (ctx )
463+ hasBucket , _ := rs .hasBucket (ctx )
464+ if ! hasBucket {
465+ return false , err
466+ }
381467
382468 log .WithFields (fields ).Warnf ("S3 GetObject failed: %s" , err )
383469 return false , err
@@ -386,3 +472,47 @@ func (rs *S3RemoteCache) hasObject(ctx context.Context, key string) (bool, error
386472 // XXX
387473 return true , nil
388474}
475+
476+ func (rs * S3RemoteCache ) getObject (ctx context.Context , key string , path string ) (int64 , error ) {
477+
478+ var partMiBs int64 = 10
479+ downloader := manager .NewDownloader (rs .s3Client , func (d * manager.Downloader ) {
480+ d .PartSize = partMiBs * 1024 * 1024
481+ })
482+ buffer := manager .NewWriteAtBuffer ([]byte {})
483+ res , err := downloader .Download (context .TODO (), buffer , & s3.GetObjectInput {
484+ Bucket : aws .String (rs .BucketName ),
485+ Key : aws .String (key ),
486+ })
487+ if err != nil {
488+ return res , err
489+ }
490+
491+ err = os .WriteFile (path , buffer .Bytes (), 0644 )
492+ if err != nil {
493+ return 0 , xerrors .Errorf ("failed to write s3 download to %s: %s" , path , err )
494+ }
495+ return res , nil
496+ }
497+
498+ func (rs * S3RemoteCache ) uploadObject (ctx context.Context , key string , path string ) (* manager.UploadOutput , error ) {
499+
500+ fN , err := os .Open (path )
501+ if err != nil {
502+ return nil , xerrors .Errorf ("cannot open %s for S3 upload: %s" , path , err )
503+ }
504+
505+ var partMiBs int64 = 10
506+ uploader := manager .NewUploader (rs .s3Client , func (u * manager.Uploader ) {
507+ u .PartSize = partMiBs * 1024 * 1024
508+ })
509+ res , err := uploader .Upload (context .TODO (), & s3.PutObjectInput {
510+ Bucket : aws .String (rs .BucketName ),
511+ Key : aws .String (key ),
512+ Body : fN ,
513+ })
514+ if err != nil {
515+ return res , err
516+ }
517+ return res , nil
518+ }
0 commit comments