diff --git a/.doc_gen/metadata/s3_metadata.yaml b/.doc_gen/metadata/s3_metadata.yaml index ec065404228..107bb09b428 100644 --- a/.doc_gen/metadata/s3_metadata.yaml +++ b/.doc_gen/metadata/s3_metadata.yaml @@ -2598,14 +2598,16 @@ s3_Scenario_UsingLargeFiles: - sdk_version: 2 github: gov2/s3 excerpts: - - description: Upload a large object by using an upload manager to break the data into parts and upload them concurrently. + - description: Create functions that use upload and download managers to break the data into parts and + transfer them concurrently. snippet_tags: - gov2.s3.BucketBasics.struct - gov2.s3.Upload - - description: Download a large object by using a download manager to get the data in parts and download them - concurrently. - snippet_tags: - gov2.s3.Download + - description: Run an interactive scenario that shows you how to use the upload and download + managers in context. + snippet_tags: + - gov2.s3.Scenario_LargeObjects Python: versions: - sdk_version: 3 @@ -2971,7 +2973,7 @@ s3_Scenario_GettingStarted: - description: Define a struct that wraps bucket and object actions used by the scenario. snippet_tags: - gov2.s3.BucketBasics.complete - - description: Run an interactive scenario that shows you how work with S3 buckets and objects. + - description: Run an interactive scenario that shows you how to work with S3 buckets and objects. snippet_tags: - gov2.s3.Scenario_GetStarted Rust: diff --git a/gov2/s3/README.md b/gov2/s3/README.md index b544b08ae56..800493d26ec 100644 --- a/gov2/s3/README.md +++ b/gov2/s3/README.md @@ -45,20 +45,20 @@ Code examples that show you how to perform the essential operations within a ser Code excerpts that show you how to call individual service functions. -- [CopyObject](actions/bucket_basics.go#L220) -- [CreateBucket](actions/bucket_basics.go#L81) -- [DeleteBucket](actions/bucket_basics.go#L278) +- [CopyObject](actions/bucket_basics.go#L288) +- [CreateBucket](actions/bucket_basics.go#L94) +- [DeleteBucket](actions/bucket_basics.go#L387) - [DeleteObject](../workflows/s3_object_lock/actions/s3_actions.go#L365) -- [DeleteObjects](../workflows/s3_object_lock/actions/s3_actions.go#L407) -- [GetObject](actions/bucket_basics.go#L149) +- [DeleteObjects](../workflows/s3_object_lock/actions/s3_actions.go#L413) +- [GetObject](actions/bucket_basics.go#L200) - [GetObjectLegalHold](../workflows/s3_object_lock/actions/s3_actions.go#L72) - [GetObjectLockConfiguration](../workflows/s3_object_lock/actions/s3_actions.go#L109) - [GetObjectRetention](../workflows/s3_object_lock/actions/s3_actions.go#L138) -- [HeadBucket](actions/bucket_basics.go#L51) -- [ListBuckets](actions/bucket_basics.go#L35) +- [HeadBucket](actions/bucket_basics.go#L64) +- [ListBuckets](actions/bucket_basics.go#L36) - [ListObjectVersions](../workflows/s3_object_lock/actions/s3_actions.go#L338) -- [ListObjectsV2](actions/bucket_basics.go#L238) -- [PutObject](actions/bucket_basics.go#L100) +- [ListObjectsV2](actions/bucket_basics.go#L316) +- [PutObject](actions/bucket_basics.go#L126) - [PutObjectLegalHold](../workflows/s3_object_lock/actions/s3_actions.go#L173) - [PutObjectLockConfiguration](../workflows/s3_object_lock/actions/s3_actions.go#L234) - [PutObjectRetention](../workflows/s3_object_lock/actions/s3_actions.go#L276) diff --git a/gov2/s3/actions/bucket_basics.go b/gov2/s3/actions/bucket_basics.go index c65f7d08316..f704ea7c33c 100644 --- a/gov2/s3/actions/bucket_basics.go +++ b/gov2/s3/actions/bucket_basics.go @@ -3,6 +3,9 @@ package actions +// snippet-start:[gov2.s3.BucketBasics.complete] +// snippet-start:[gov2.s3.BucketBasics.struct] + import ( "bytes" "context" @@ -11,6 +14,7 @@ import ( "io" "log" "os" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" @@ -19,9 +23,6 @@ import ( "github.com/aws/smithy-go" ) -// snippet-start:[gov2.s3.BucketBasics.complete] -// snippet-start:[gov2.s3.BucketBasics.struct] - // BucketBasics encapsulates the Amazon Simple Storage Service (Amazon S3) actions // used in the examples. // It contains S3Client, an Amazon S3 service client that is used to perform bucket @@ -36,12 +37,24 @@ type BucketBasics struct { // ListBuckets lists the buckets in the current account. func (basics BucketBasics) ListBuckets(ctx context.Context) ([]types.Bucket, error) { - result, err := basics.S3Client.ListBuckets(ctx, &s3.ListBucketsInput{}) + var err error + var output *s3.ListBucketsOutput var buckets []types.Bucket - if err != nil { - log.Printf("Couldn't list buckets for your account. Here's why: %v\n", err) - } else { - buckets = result.Buckets + bucketPaginator := s3.NewListBucketsPaginator(basics.S3Client, &s3.ListBucketsInput{}) + for bucketPaginator.HasMorePages() { + output, err = bucketPaginator.NextPage(ctx) + if err != nil { + var apiErr smithy.APIError + if errors.As(err, &apiErr) && apiErr.ErrorCode() == "AccessDenied" { + fmt.Println("You don't have permission to list buckets for this account.") + err = apiErr + } else { + log.Printf("Couldn't list buckets for your account. Here's why: %v\n", err) + } + break + } else { + buckets = append(buckets, output.Buckets...) + } } return buckets, err } @@ -89,8 +102,21 @@ func (basics BucketBasics) CreateBucket(ctx context.Context, name string, region }, }) if err != nil { - log.Printf("Couldn't create bucket %v in Region %v. Here's why: %v\n", - name, region, err) + var owned *types.BucketAlreadyOwnedByYou + var exists *types.BucketAlreadyExists + if errors.As(err, &owned) { + log.Printf("You already own bucket %s.\n", name) + err = owned + } else if errors.As(err, &exists) { + log.Printf("Bucket %s already exists.\n", name) + err = exists + } + } else { + err = s3.NewBucketExistsWaiter(basics.S3Client).Wait( + ctx, &s3.HeadBucketInput{Bucket: aws.String(name)}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for bucket %s to exist.\n", name) + } } return err } @@ -112,8 +138,21 @@ func (basics BucketBasics) UploadFile(ctx context.Context, bucketName string, ob Body: file, }) if err != nil { - log.Printf("Couldn't upload file %v to %v:%v. Here's why: %v\n", - fileName, bucketName, objectKey, err) + var apiErr smithy.APIError + if errors.As(err, &apiErr) && apiErr.ErrorCode() == "EntityTooLarge" { + log.Printf("Error while uploading object to %s. The object is too large.\n"+ + "To upload objects larger than 5GB, use the S3 console (160GB max)\n"+ + "or the multipart upload API (5TB max).", bucketName) + } else { + log.Printf("Couldn't upload file %v to %v:%v. Here's why: %v\n", + fileName, bucketName, objectKey, err) + } + } else { + err = s3.NewObjectExistsWaiter(basics.S3Client).Wait( + ctx, &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: aws.String(objectKey)}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for object %s to exist.\n", objectKey) + } } } return err @@ -137,8 +176,20 @@ func (basics BucketBasics) UploadLargeObject(ctx context.Context, bucketName str Body: largeBuffer, }) if err != nil { - log.Printf("Couldn't upload large object to %v:%v. Here's why: %v\n", - bucketName, objectKey, err) + var apiErr smithy.APIError + if errors.As(err, &apiErr) && apiErr.ErrorCode() == "EntityTooLarge" { + log.Printf("Error while uploading object to %s. The object is too large.\n"+ + "The maximum size for a multipart upload is 5TB.", bucketName) + } else { + log.Printf("Couldn't upload large object to %v:%v. Here's why: %v\n", + bucketName, objectKey, err) + } + } else { + err = s3.NewObjectExistsWaiter(basics.S3Client).Wait( + ctx, &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: aws.String(objectKey)}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for object %s to exist.\n", objectKey) + } } return err @@ -155,7 +206,13 @@ func (basics BucketBasics) DownloadFile(ctx context.Context, bucketName string, Key: aws.String(objectKey), }) if err != nil { - log.Printf("Couldn't get object %v:%v. Here's why: %v\n", bucketName, objectKey, err) + var noKey *types.NoSuchKey + if errors.As(err, &noKey) { + log.Printf("Can't get object %s from bucket %s. No such key exists.\n", objectKey, bucketName) + err = noKey + } else { + log.Printf("Couldn't get object %v:%v. Here's why: %v\n", bucketName, objectKey, err) + } return err } defer result.Body.Close() @@ -203,14 +260,25 @@ func (basics BucketBasics) DownloadLargeObject(ctx context.Context, bucketName s // CopyToFolder copies an object in a bucket to a subfolder in the same bucket. func (basics BucketBasics) CopyToFolder(ctx context.Context, bucketName string, objectKey string, folderName string) error { + objectDest := fmt.Sprintf("%v/%v", folderName, objectKey) _, err := basics.S3Client.CopyObject(ctx, &s3.CopyObjectInput{ Bucket: aws.String(bucketName), CopySource: aws.String(fmt.Sprintf("%v/%v", bucketName, objectKey)), - Key: aws.String(fmt.Sprintf("%v/%v", folderName, objectKey)), + Key: aws.String(objectDest), }) if err != nil { - log.Printf("Couldn't copy object from %v:%v to %v:%v/%v. Here's why: %v\n", - bucketName, objectKey, bucketName, folderName, objectKey, err) + var notActive *types.ObjectNotInActiveTierError + if errors.As(err, ¬Active) { + log.Printf("Couldn't copy object %s from %s because the object isn't in the active tier.\n", + objectKey, bucketName) + err = notActive + } + } else { + err = s3.NewObjectExistsWaiter(basics.S3Client).Wait( + ctx, &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: aws.String(objectDest)}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for object %s to exist.\n", objectDest) + } } return err } @@ -227,8 +295,18 @@ func (basics BucketBasics) CopyToBucket(ctx context.Context, sourceBucket string Key: aws.String(objectKey), }) if err != nil { - log.Printf("Couldn't copy object from %v:%v to %v:%v. Here's why: %v\n", - sourceBucket, objectKey, destinationBucket, objectKey, err) + var notActive *types.ObjectNotInActiveTierError + if errors.As(err, ¬Active) { + log.Printf("Couldn't copy object %s from %s because the object isn't in the active tier.\n", + objectKey, sourceBucket) + err = notActive + } + } else { + err = s3.NewObjectExistsWaiter(basics.S3Client).Wait( + ctx, &s3.HeadObjectInput{Bucket: aws.String(destinationBucket), Key: aws.String(objectKey)}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for object %s to exist.\n", objectKey) + } } return err } @@ -239,16 +317,27 @@ func (basics BucketBasics) CopyToBucket(ctx context.Context, sourceBucket string // ListObjects lists the objects in a bucket. func (basics BucketBasics) ListObjects(ctx context.Context, bucketName string) ([]types.Object, error) { - result, err := basics.S3Client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ + var err error + var output *s3.ListObjectsV2Output + input := &s3.ListObjectsV2Input{ Bucket: aws.String(bucketName), - }) - var contents []types.Object - if err != nil { - log.Printf("Couldn't list objects in bucket %v. Here's why: %v\n", bucketName, err) - } else { - contents = result.Contents } - return contents, err + var objects []types.Object + objectPaginator := s3.NewListObjectsV2Paginator(basics.S3Client, input) + for objectPaginator.HasMorePages() { + output, err = objectPaginator.NextPage(ctx) + if err != nil { + var noBucket *types.NoSuchBucket + if errors.As(err, &noBucket) { + log.Printf("Bucket %s does not exist.\n", bucketName) + err = noBucket + } + break + } else { + objects = append(objects, output.Contents...) + } + } + return objects, err } // snippet-end:[gov2.s3.ListObjectsV2] @@ -263,12 +352,32 @@ func (basics BucketBasics) DeleteObjects(ctx context.Context, bucketName string, } output, err := basics.S3Client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ Bucket: aws.String(bucketName), - Delete: &types.Delete{Objects: objectIds}, + Delete: &types.Delete{Objects: objectIds, Quiet: aws.Bool(true)}, }) - if err != nil { - log.Printf("Couldn't delete objects from bucket %v. Here's why: %v\n", bucketName, err) + if err != nil || len(output.Errors) > 0 { + log.Printf("Error deleting objects from bucket %s.\n", bucketName) + if err != nil { + var noBucket *types.NoSuchBucket + if errors.As(err, &noBucket) { + log.Printf("Bucket %s does not exist.\n", bucketName) + err = noBucket + } + } else if len(output.Errors) > 0 { + for _, outErr := range output.Errors { + log.Printf("%s: %s\n", *outErr.Key, *outErr.Message) + } + err = fmt.Errorf("%s", *output.Errors[0].Message) + } } else { - log.Printf("Deleted %v objects.\n", len(output.Deleted)) + for _, delObjs := range output.Deleted { + err = s3.NewObjectNotExistsWaiter(basics.S3Client).Wait( + ctx, &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: delObjs.Key}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for object %s to be deleted.\n", *delObjs.Key) + } else { + log.Printf("Deleted %s.\n", *delObjs.Key) + } + } } return err } @@ -282,7 +391,21 @@ func (basics BucketBasics) DeleteBucket(ctx context.Context, bucketName string) _, err := basics.S3Client.DeleteBucket(ctx, &s3.DeleteBucketInput{ Bucket: aws.String(bucketName)}) if err != nil { - log.Printf("Couldn't delete bucket %v. Here's why: %v\n", bucketName, err) + var noBucket *types.NoSuchBucket + if errors.As(err, &noBucket) { + log.Printf("Bucket %s does not exist.\n", bucketName) + err = noBucket + } else { + log.Printf("Couldn't delete bucket %v. Here's why: %v\n", bucketName, err) + } + } else { + err = s3.NewBucketNotExistsWaiter(basics.S3Client).Wait( + ctx, &s3.HeadBucketInput{Bucket: aws.String(bucketName)}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for bucket %s to be deleted.\n", bucketName) + } else { + log.Printf("Deleted %s.\n", bucketName) + } } return err } diff --git a/gov2/s3/actions/bucket_basics_test.go b/gov2/s3/actions/bucket_basics_test.go index 8d96641303f..ac32cd181ad 100644 --- a/gov2/s3/actions/bucket_basics_test.go +++ b/gov2/s3/actions/bucket_basics_test.go @@ -8,17 +8,29 @@ package actions import ( "context" "errors" + "reflect" "testing" "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/awsdocs/aws-doc-sdk-examples/gov2/s3/stubs" "github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools" ) -func enterTest() (*testtools.AwsmStubber, *BucketBasics) { +func enterTest() (context.Context, *testtools.AwsmStubber, *BucketBasics) { stubber := testtools.NewStubber() basics := &BucketBasics{S3Client: s3.NewFromConfig(*stubber.SdkConfig)} - return stubber, basics + return context.Background(), stubber, basics +} + +func wrapErr(expectedErr error) (error, *testtools.StubError) { + return expectedErr, &testtools.StubError{Err: expectedErr} +} + +func verifyErr(expectedErr error, actualErr error, t *testing.T) { + if reflect.TypeOf(expectedErr) != reflect.TypeOf(actualErr) { + t.Errorf("Expected error %T, got %T", expectedErr, actualErr) + } } func TestBucketBasics_CopyToBucket(t *testing.T) { @@ -27,12 +39,13 @@ func TestBucketBasics_CopyToBucket(t *testing.T) { } func CopyToBucket(raiseErr *testtools.StubError, t *testing.T) { - stubber, basics := enterTest() - stubber.Add(stubs.StubCopyObject("amzn-s3-demo-bucket-source", "object-key", "amzn-s3-demo-bucket-dest", "object-key", raiseErr)) - ctx := context.Background() + ctx, stubber, basics := enterTest() + defer testtools.ExitTest(stubber, t) - err := basics.CopyToBucket(ctx, "amzn-s3-demo-bucket-source", "amzn-s3-demo-bucket-dest", "object-key") + expectedErr, stubErr := wrapErr(&types.ObjectNotInActiveTierError{}) + stubber.Add(stubs.StubCopyObject("amzn-s3-demo-bucket-source", "object-key", "amzn-s3-demo-bucket-dest", "object-key", stubErr)) + stubber.Add(stubs.StubHeadObject("amzn-s3-demo-bucket-source", "object-key", raiseErr)) - testtools.VerifyError(err, raiseErr, t) - testtools.ExitTest(stubber, t) + actualErr := basics.CopyToBucket(ctx, "amzn-s3-demo-bucket-source", "amzn-s3-demo-bucket-dest", "object-key") + verifyErr(expectedErr, actualErr, t) } diff --git a/gov2/s3/actions/presigner.go b/gov2/s3/actions/presigner.go index 24a8d4719f3..ad46cf75d7c 100644 --- a/gov2/s3/actions/presigner.go +++ b/gov2/s3/actions/presigner.go @@ -3,6 +3,9 @@ package actions +// snippet-start:[gov2.s3.Presigner.complete] +// snippet-start:[gov2.Presigner.struct] + import ( "context" "log" @@ -13,9 +16,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" ) -// snippet-start:[gov2.s3.Presigner.complete] -// snippet-start:[gov2.Presigner.struct] - // Presigner encapsulates the Amazon Simple Storage Service (Amazon S3) presign actions // used in the examples. // It contains PresignClient, a client that is used to presign requests to Amazon S3. @@ -47,7 +47,7 @@ func (presigner Presigner) GetObject( // snippet-end:[gov2.s3.PresignGetObject] -// snippet-start:[gov2.s3.PresignPubObject] +// snippet-start:[gov2.s3.PresignPutObject] // PutObject makes a presigned request that can be used to put an object in a bucket. // The presigned request is valid for the specified number of seconds. @@ -66,7 +66,7 @@ func (presigner Presigner) PutObject( return request, err } -// snippet-end:[gov2.s3.PresignPubObject] +// snippet-end:[gov2.s3.PresignPutObject] // snippet-start:[gov2.s3.PresignDeleteObject] diff --git a/gov2/s3/cmd/main.go b/gov2/s3/cmd/main.go index ba7c6114459..df87550daed 100644 --- a/gov2/s3/cmd/main.go +++ b/gov2/s3/cmd/main.go @@ -23,13 +23,16 @@ import ( // - `getstarted` - Runs the interactive get started scenario that shows you how to use // Amazon Simple Storage Service (Amazon S3) actions to work with // S3 buckets and objects. +// - `largeobjects` - Runs the interactive large objects scenario that shows you how to upload +// and download large objects by using a transfer manager. // - `presigning` - Runs the interactive presigning scenario that shows you how to // get presigned requests that contain temporary credentials // and can be used to make requests from any HTTP client. func main() { scenarioMap := map[string]func(ctx context.Context, sdkConfig aws.Config){ - "getstarted": runGetStartedScenario, - "presigning": runPresigningScenario, + "getstarted": runGetStartedScenario, + "largeobjects": runLargeObjectScenario, + "presigning": runPresigningScenario, } choices := make([]string, len(scenarioMap)) choiceIndex := 0 @@ -61,6 +64,10 @@ func runGetStartedScenario(ctx context.Context, sdkConfig aws.Config) { scenarios.RunGetStartedScenario(ctx, sdkConfig, demotools.NewQuestioner()) } +func runLargeObjectScenario(ctx context.Context, sdkConfig aws.Config) { + scenarios.RunLargeObjectScenario(ctx, sdkConfig, demotools.NewQuestioner()) +} + func runPresigningScenario(ctx context.Context, sdkConfig aws.Config) { scenarios.RunPresigningScenario(ctx, sdkConfig, demotools.NewQuestioner(), scenarios.HttpRequester{}) } diff --git a/gov2/s3/hello/hello.go b/gov2/s3/hello/hello.go index 073a9825a83..86d35ab8711 100644 --- a/gov2/s3/hello/hello.go +++ b/gov2/s3/hello/hello.go @@ -7,10 +7,12 @@ package main import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/smithy-go" ) // main uses the AWS SDK for Go V2 to create an Amazon Simple Storage Service @@ -30,7 +32,12 @@ func main() { fmt.Printf("Let's list up to %v buckets for your account.\n", count) result, err := s3Client.ListBuckets(ctx, &s3.ListBucketsInput{}) if err != nil { - fmt.Printf("Couldn't list buckets for your account. Here's why: %v\n", err) + var ae smithy.APIError + if errors.As(err, &ae) && ae.ErrorCode() == "AccessDenied" { + fmt.Println("You don't have permission to list buckets for this account.") + } else { + fmt.Printf("Couldn't list buckets for your account. Here's why: %v\n", err) + } return } if len(result.Buckets) == 0 { diff --git a/gov2/s3/scenarios/scenario_get_started.go b/gov2/s3/scenarios/scenario_get_started.go index 3b04d861344..00840cf14b3 100644 --- a/gov2/s3/scenarios/scenario_get_started.go +++ b/gov2/s3/scenarios/scenario_get_started.go @@ -3,9 +3,10 @@ package scenarios +// snippet-start:[gov2.s3.Scenario_GetStarted] + import ( "context" - "crypto/rand" "fmt" "log" "os" @@ -17,20 +18,16 @@ import ( "github.com/awsdocs/aws-doc-sdk-examples/gov2/s3/actions" ) -// snippet-start:[gov2.s3.Scenario_GetStarted] - // RunGetStartedScenario is an interactive example that shows you how to use Amazon // Simple Storage Service (Amazon S3) to create an S3 bucket and use it to store objects. // // 1. Create a bucket. // 2. Upload a local file to the bucket. -// 3. Upload a large object to the bucket by using an upload manager. -// 4. Download an object to a local file. -// 5. Download a large object by using a download manager. -// 6. Copy an object to a different folder in the bucket. -// 7. List objects in the bucket. -// 8. Delete all objects in the bucket. -// 9. Delete the bucket. +// 3. Download an object to a local file. +// 4. Copy an object to a different folder in the bucket. +// 5. List objects in the bucket. +// 6. Delete all objects in the bucket. +// 7. Delete the bucket. // // This example creates an Amazon S3 service client from the specified sdkConfig so that // you can replace it with a mocked or stubbed config for unit testing. @@ -40,7 +37,11 @@ import ( func RunGetStartedScenario(ctx context.Context, sdkConfig aws.Config, questioner demotools.IQuestioner) { defer func() { if r := recover(); r != nil { - fmt.Println("Something went wrong with the demo.\n", r) + log.Println("Something went wrong with the demo.") + _, isMock := questioner.(*demotools.MockQuestioner) + if isMock || questioner.AskBool("Do you want to see the full error message (y/n)?", "y") { + log.Println(r) + } } }() @@ -95,20 +96,6 @@ func RunGetStartedScenario(ctx context.Context, sdkConfig aws.Config, questioner log.Printf("Uploaded %v as %v.\n", smallFile, smallKey) log.Println(strings.Repeat("-", 88)) - mibs := 30 - log.Printf("Let's create a slice of %v MiB of random bytes and upload it to your bucket. ", mibs) - questioner.Ask("Press Enter when you're ready.") - largeBytes := make([]byte, 1024*1024*mibs) - _, _ = rand.Read(largeBytes) - largeKey := "doc-example-large" - log.Println("Uploading...") - err = bucketBasics.UploadLargeObject(ctx, bucketName, largeKey, largeBytes) - if err != nil { - panic(err) - } - log.Printf("Uploaded %v MiB object as %v", mibs, largeKey) - log.Println(strings.Repeat("-", 88)) - log.Printf("Let's download %v to a file.", smallKey) downloadFileName := questioner.Ask("Enter a name for the downloaded file:", demotools.NotEmpty{}) err = bucketBasics.DownloadFile(ctx, bucketName, smallKey, downloadFileName) @@ -118,16 +105,6 @@ func RunGetStartedScenario(ctx context.Context, sdkConfig aws.Config, questioner log.Printf("File %v downloaded.", downloadFileName) log.Println(strings.Repeat("-", 88)) - log.Printf("Let's download the %v MiB object.", mibs) - questioner.Ask("Press Enter when you're ready.") - log.Println("Downloading...") - largeDownload, err := bucketBasics.DownloadLargeObject(ctx, bucketName, largeKey) - if err != nil { - panic(err) - } - log.Printf("Downloaded %v bytes.", len(largeDownload)) - log.Println(strings.Repeat("-", 88)) - log.Printf("Let's copy %v to a folder in the same bucket.", smallKey) folderName := questioner.Ask("Enter a folder name: ", demotools.NotEmpty{}) err = bucketBasics.CopyToFolder(ctx, bucketName, smallKey, folderName) diff --git a/gov2/s3/scenarios/scenario_get_started_integ_test.go b/gov2/s3/scenarios/scenario_get_started_integ_test.go index 2a936580c9f..86ecb0e7370 100644 --- a/gov2/s3/scenarios/scenario_get_started_integ_test.go +++ b/gov2/s3/scenarios/scenario_get_started_integ_test.go @@ -31,7 +31,7 @@ func TestGetStartedScenario_Integration(t *testing.T) { outFile := "integ-test.out" mockQuestioner := &demotools.MockQuestioner{ Answers: []string{ - bucket, "../README.md", "", outFile, "", "test-folder", "", "y", + bucket, "../README.md", outFile, "test-folder", "", "y", }, } diff --git a/gov2/s3/scenarios/scenario_get_started_test.go b/gov2/s3/scenarios/scenario_get_started_test.go index 26f42ee4dad..a22e1a6d4ce 100644 --- a/gov2/s3/scenarios/scenario_get_started_test.go +++ b/gov2/s3/scenarios/scenario_get_started_test.go @@ -9,7 +9,6 @@ import ( "context" "fmt" "io" - "net/http" "os" "strings" "testing" @@ -30,17 +29,6 @@ func TestRunGetStartedScenario(t *testing.T) { testtools.RunScenarioTests(&scenTest, t) } -// httpErr is used to mock an HTTP error. This is required by the download manager, -// which calls GetObject until it receives a 415 status code. -type httpErr struct { - statusCode int -} - -func (responseErr httpErr) HTTPStatusCode() int { return responseErr.statusCode } -func (responseErr httpErr) Error() string { - return fmt.Sprintf("HTTP error: %v", responseErr.statusCode) -} - // GetStartedScenarioTest encapsulates data for a scenario test. type GetStartedScenarioTest struct { Answers []string @@ -52,20 +40,17 @@ type GetStartedScenarioTest struct { func (scenTest *GetStartedScenarioTest) SetupDataAndStubs() []testtools.Stub { bucketName := "amzn-s3-demo-bucket-1" objectKey := "doc-example-key" - largeKey := "doc-example-large" bucketList := []types.Bucket{{Name: aws.String(bucketName)}, {Name: aws.String("amzn-s3-demo-bucket-2")}} testConfig, err := config.LoadDefaultConfig(context.TODO()) if err != nil { panic(err) } - uploadId := "upload-id" testBody := io.NopCloser(strings.NewReader("Test data!")) - dnRanges := []int{0, 10 * 1024 * 1024, 20 * 1024 * 1024, 30 * 1024 * 1024, 40 * 1024 * 1024} scenTest.OutFilename = "test.out" copyFolder := "copy_folder" listKeys := []string{"object-1", "object-2", "object-3"} scenTest.Answers = []string{ - bucketName, "../README.md", "", scenTest.OutFilename, "", copyFolder, "", "y", + bucketName, "../README.md", scenTest.OutFilename, copyFolder, "", "y", } var stubList []testtools.Stub @@ -73,27 +58,21 @@ func (scenTest *GetStartedScenarioTest) SetupDataAndStubs() []testtools.Stub { stubList = append(stubList, stubs.StubHeadBucket( bucketName, &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) stubList = append(stubList, stubs.StubCreateBucket(bucketName, testConfig.Region, nil)) + stubList = append(stubList, stubs.StubHeadBucket(bucketName, nil)) stubList = append(stubList, stubs.StubPutObject(bucketName, objectKey, nil)) - stubList = append(stubList, stubs.StubCreateMultipartUpload(bucketName, largeKey, uploadId, nil)) - stubList = append(stubList, stubs.StubUploadPart(bucketName, largeKey, uploadId, nil)) - stubList = append(stubList, stubs.StubUploadPart(bucketName, largeKey, uploadId, nil)) - stubList = append(stubList, stubs.StubUploadPart(bucketName, largeKey, uploadId, nil)) - stubList = append(stubList, stubs.StubCompleteMultipartUpload(bucketName, largeKey, uploadId, []int32{1, 2, 3}, nil)) + stubList = append(stubList, stubs.StubHeadObject(bucketName, objectKey, nil)) stubList = append(stubList, stubs.StubGetObject(bucketName, objectKey, nil, testBody, nil)) - for i := 0; i < len(dnRanges)-2; i++ { - stubList = append(stubList, stubs.StubGetObject(bucketName, largeKey, - aws.String(fmt.Sprintf("bytes=%v-%v", dnRanges[i], dnRanges[i+1]-1)), testBody, nil)) - } - // The S3 downloader calls GetObject until it receives a 416 HTTP status code. - respErr := httpErr{statusCode: http.StatusRequestedRangeNotSatisfiable} - stubList = append(stubList, stubs.StubGetObject(bucketName, largeKey, - aws.String(fmt.Sprintf("bytes=%v-%v", dnRanges[3], dnRanges[4]-1)), testBody, - &testtools.StubError{Err: respErr, ContinueAfter: true})) stubList = append(stubList, stubs.StubCopyObject( bucketName, objectKey, bucketName, fmt.Sprintf("%v/%v", copyFolder, objectKey), nil)) + stubList = append(stubList, stubs.StubHeadObject(bucketName, fmt.Sprintf("%v/%v", copyFolder, objectKey), nil)) stubList = append(stubList, stubs.StubListObjectsV2(bucketName, listKeys, nil)) stubList = append(stubList, stubs.StubDeleteObjects(bucketName, listKeys, nil)) + for _, key := range listKeys { + stubList = append(stubList, stubs.StubHeadObject(bucketName, key, + &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) + } stubList = append(stubList, stubs.StubDeleteBucket(bucketName, nil)) + stubList = append(stubList, stubs.StubHeadBucket(bucketName, &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) return stubList } diff --git a/gov2/s3/scenarios/scenario_large_objects.go b/gov2/s3/scenarios/scenario_large_objects.go new file mode 100644 index 00000000000..c163e6617af --- /dev/null +++ b/gov2/s3/scenarios/scenario_large_objects.go @@ -0,0 +1,113 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package scenarios + +// snippet-start:[gov2.s3.Scenario_LargeObjects] + +import ( + "context" + "crypto/rand" + "log" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/s3/actions" +) + +// RunLargeObjectScenario is an interactive example that shows you how to use Amazon +// Simple Storage Service (Amazon S3) to upload and download large objects. +// +// 1. Create a bucket. +// 3. Upload a large object to the bucket by using an upload manager. +// 5. Download a large object by using a download manager. +// 8. Delete all objects in the bucket. +// 9. Delete the bucket. +// +// This example creates an Amazon S3 service client from the specified sdkConfig so that +// you can replace it with a mocked or stubbed config for unit testing. +// +// It uses a questioner from the `demotools` package to get input during the example. +// This package can be found in the ..\..\demotools folder of this repo. +func RunLargeObjectScenario(ctx context.Context, sdkConfig aws.Config, questioner demotools.IQuestioner) { + defer func() { + if r := recover(); r != nil { + log.Println("Something went wrong with the demo.") + _, isMock := questioner.(*demotools.MockQuestioner) + if isMock || questioner.AskBool("Do you want to see the full error message (y/n)?", "y") { + log.Println(r) + } + } + }() + + log.Println(strings.Repeat("-", 88)) + log.Println("Welcome to the Amazon S3 large object demo.") + log.Println(strings.Repeat("-", 88)) + + s3Client := s3.NewFromConfig(sdkConfig) + bucketBasics := actions.BucketBasics{S3Client: s3Client} + + bucketName := questioner.Ask("Let's create a bucket. Enter a name for your bucket:", + demotools.NotEmpty{}) + bucketExists, err := bucketBasics.BucketExists(ctx, bucketName) + if err != nil { + panic(err) + } + if !bucketExists { + err = bucketBasics.CreateBucket(ctx, bucketName, sdkConfig.Region) + if err != nil { + panic(err) + } else { + log.Println("Bucket created.") + } + } + log.Println(strings.Repeat("-", 88)) + + mibs := 30 + log.Printf("Let's create a slice of %v MiB of random bytes and upload it to your bucket. ", mibs) + questioner.Ask("Press Enter when you're ready.") + largeBytes := make([]byte, 1024*1024*mibs) + _, _ = rand.Read(largeBytes) + largeKey := "doc-example-large" + log.Println("Uploading...") + err = bucketBasics.UploadLargeObject(ctx, bucketName, largeKey, largeBytes) + if err != nil { + panic(err) + } + log.Printf("Uploaded %v MiB object as %v", mibs, largeKey) + log.Println(strings.Repeat("-", 88)) + + log.Printf("Let's download the %v MiB object.", mibs) + questioner.Ask("Press Enter when you're ready.") + log.Println("Downloading...") + largeDownload, err := bucketBasics.DownloadLargeObject(ctx, bucketName, largeKey) + if err != nil { + panic(err) + } + log.Printf("Downloaded %v bytes.", len(largeDownload)) + log.Println(strings.Repeat("-", 88)) + + if questioner.AskBool("Do you want to delete your bucket and all of its "+ + "contents? (y/n)", "y") { + log.Println("Deleting object.") + err = bucketBasics.DeleteObjects(ctx, bucketName, []string{largeKey}) + if err != nil { + panic(err) + } + log.Println("Deleting bucket.") + err = bucketBasics.DeleteBucket(ctx, bucketName) + if err != nil { + panic(err) + } + } else { + log.Println("Okay. Don't forget to delete objects from your bucket to avoid charges.") + } + log.Println(strings.Repeat("-", 88)) + + log.Println("Thanks for watching!") + log.Println(strings.Repeat("-", 88)) +} + +// snippet-end:[gov2.s3.Scenario_LargeObjects] diff --git a/gov2/s3/scenarios/scenario_large_objects_integ_test.go b/gov2/s3/scenarios/scenario_large_objects_integ_test.go new file mode 100644 index 00000000000..5d42ec4c68e --- /dev/null +++ b/gov2/s3/scenarios/scenario_large_objects_integ_test.go @@ -0,0 +1,56 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +//go:build integration +// +build integration + +// Integration test for the Amazon S3 get started scenario. + +package scenarios + +import ( + "bytes" + "context" + "fmt" + "log" + "os" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" + "github.com/google/uuid" +) + +func TestLargeObjectScenario_Integration(t *testing.T) { + bucket := os.Getenv("S3_BUCKET_NAME_PREFIX") + if bucket == "" { + bucket = "amzn-s3-demo-bucket" + } else { + bucket = fmt.Sprintf("%s-%s", bucket, uuid.New()) + } + outFile := "integ-test.out" + mockQuestioner := &demotools.MockQuestioner{ + Answers: []string{ + bucket, "", "", "y", + }, + } + + ctx := context.Background() + sdkConfig, err := config.LoadDefaultConfig(ctx) + if err != nil { + log.Fatalf("unable to load SDK config, %v", err) + } + + log.SetFlags(0) + var buf bytes.Buffer + log.SetOutput(&buf) + + RunLargeObjectScenario(ctx, sdkConfig, mockQuestioner) + + _ = os.Remove(outFile) + + log.SetOutput(os.Stderr) + if !strings.Contains(buf.String(), "Thanks for watching") { + t.Errorf("didn't run to successful completion. Here's the log:\n%v", buf.String()) + } +} diff --git a/gov2/s3/scenarios/scenario_large_objects_test.go b/gov2/s3/scenarios/scenario_large_objects_test.go new file mode 100644 index 00000000000..1dd9e8efbcd --- /dev/null +++ b/gov2/s3/scenarios/scenario_large_objects_test.go @@ -0,0 +1,108 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// Unit tests for the get started scenario. + +package scenarios + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/s3/stubs" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools" +) + +// TestRunLargeObjectScenario runs the scenario multiple times. The first time, it runs with no +// errors. In subsequent runs, it specifies that each stub in the sequence should +// raise an error and verifies the results. +func TestRunLargeObjectScenario(t *testing.T) { + scenTest := LargeObjectScenarioTest{} + testtools.RunScenarioTests(&scenTest, t) +} + +// httpErr is used to mock an HTTP error. This is required by the download manager, +// which calls GetObject until it receives a 415 status code. +type httpErr struct { + statusCode int +} + +func (responseErr httpErr) HTTPStatusCode() int { return responseErr.statusCode } +func (responseErr httpErr) Error() string { + return fmt.Sprintf("HTTP error: %v", responseErr.statusCode) +} + +// LargeObjectScenarioTest encapsulates data for a scenario test. +type LargeObjectScenarioTest struct { + Answers []string + OutFilename string +} + +// SetupDataAndStubs sets up test data and builds the stubs that are used to return +// mocked data. +func (scenTest *LargeObjectScenarioTest) SetupDataAndStubs() []testtools.Stub { + bucketName := "amzn-s3-demo-bucket-1" + largeKey := "doc-example-large" + testConfig, err := config.LoadDefaultConfig(context.TODO()) + if err != nil { + panic(err) + } + uploadId := "upload-id" + testBody := io.NopCloser(strings.NewReader("Test data!")) + dnRanges := []int{0, 10 * 1024 * 1024, 20 * 1024 * 1024, 30 * 1024 * 1024, 40 * 1024 * 1024} + listKeys := []string{largeKey} + scenTest.Answers = []string{ + bucketName, "", "", "y", + } + + var stubList []testtools.Stub + stubList = append(stubList, stubs.StubHeadBucket( + bucketName, &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) + stubList = append(stubList, stubs.StubCreateBucket(bucketName, testConfig.Region, nil)) + stubList = append(stubList, stubs.StubHeadBucket(bucketName, nil)) + stubList = append(stubList, stubs.StubCreateMultipartUpload(bucketName, largeKey, uploadId, nil)) + stubList = append(stubList, stubs.StubUploadPart(bucketName, largeKey, uploadId, nil)) + stubList = append(stubList, stubs.StubUploadPart(bucketName, largeKey, uploadId, nil)) + stubList = append(stubList, stubs.StubUploadPart(bucketName, largeKey, uploadId, nil)) + stubList = append(stubList, stubs.StubCompleteMultipartUpload(bucketName, largeKey, uploadId, []int32{1, 2, 3}, nil)) + stubList = append(stubList, stubs.StubHeadObject(bucketName, largeKey, nil)) + for i := 0; i < len(dnRanges)-2; i++ { + stubList = append(stubList, stubs.StubGetObject(bucketName, largeKey, + aws.String(fmt.Sprintf("bytes=%v-%v", dnRanges[i], dnRanges[i+1]-1)), testBody, nil)) + } + // The S3 downloader calls GetObject until it receives a 416 HTTP status code. + respErr := httpErr{statusCode: http.StatusRequestedRangeNotSatisfiable} + stubList = append(stubList, stubs.StubGetObject(bucketName, largeKey, + aws.String(fmt.Sprintf("bytes=%v-%v", dnRanges[3], dnRanges[4]-1)), testBody, + &testtools.StubError{Err: respErr, ContinueAfter: true})) + stubList = append(stubList, stubs.StubDeleteObjects(bucketName, listKeys, nil)) + for _, key := range listKeys { + stubList = append(stubList, stubs.StubHeadObject(bucketName, key, + &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) + } + stubList = append(stubList, stubs.StubDeleteBucket(bucketName, nil)) + stubList = append(stubList, stubs.StubHeadBucket(bucketName, &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) + + return stubList +} + +// RunSubTest performs a single test run with a set of stubs set up to run with +// or without errors. +func (scenTest *LargeObjectScenarioTest) RunSubTest(stubber *testtools.AwsmStubber) { + mockQuestioner := demotools.MockQuestioner{Answers: scenTest.Answers} + RunLargeObjectScenario(context.Background(), *stubber.SdkConfig, &mockQuestioner) +} + +// Cleanup deletes the output file created by the download test. +func (scenTest *LargeObjectScenarioTest) Cleanup() { + _ = os.Remove(scenTest.OutFilename) +} diff --git a/gov2/s3/scenarios/scenario_presigning.go b/gov2/s3/scenarios/scenario_presigning.go index 976386658a5..e63e903ce9d 100644 --- a/gov2/s3/scenarios/scenario_presigning.go +++ b/gov2/s3/scenarios/scenario_presigning.go @@ -3,10 +3,11 @@ package scenarios +// snippet-start:[gov2.s3.IHttpRequester.helper] + import ( "bytes" "context" - "fmt" "io" "log" "mime/multipart" @@ -20,8 +21,6 @@ import ( "github.com/awsdocs/aws-doc-sdk-examples/gov2/s3/actions" ) -// snippet-start:[gov2.s3.IHttpRequester.helper] - // IHttpRequester abstracts HTTP requests into an interface so it can be mocked during // unit testing. type IHttpRequester interface { @@ -126,7 +125,11 @@ func sendMultipartRequest(url string, fields map[string]string, file *os.File, f func RunPresigningScenario(ctx context.Context, sdkConfig aws.Config, questioner demotools.IQuestioner, httpRequester IHttpRequester) { defer func() { if r := recover(); r != nil { - fmt.Printf("Something went wrong with the demo") + log.Println("Something went wrong with the demo.") + _, isMock := questioner.(*demotools.MockQuestioner) + if isMock || questioner.AskBool("Do you want to see the full error message (y/n)?", "y") { + log.Println(r) + } } }() diff --git a/gov2/s3/scenarios/scenario_presigning_test.go b/gov2/s3/scenarios/scenario_presigning_test.go index 9d7b168bb8c..d70813e97e2 100644 --- a/gov2/s3/scenarios/scenario_presigning_test.go +++ b/gov2/s3/scenarios/scenario_presigning_test.go @@ -69,6 +69,7 @@ func (scenTest *PresigningScenarioTest) SetupDataAndStubs() []testtools.Stub { stubList = append(stubList, stubs.StubHeadBucket( bucketName, &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) stubList = append(stubList, stubs.StubCreateBucket(bucketName, testConfig.Region, nil)) + stubList = append(stubList, stubs.StubHeadBucket(bucketName, nil)) stubList = append(stubList, stubs.StubPresignedRequest("PUT", bucketName, objectKey, nil)) stubList = append(stubList, stubs.StubPresignedRequest("GET", bucketName, objectKey, nil)) stubList = append(stubList, stubs.StubPresignedRequest("POST", bucketName, objectKey, nil)) diff --git a/gov2/s3/stubs/bucket_basics_stubs.go b/gov2/s3/stubs/bucket_basics_stubs.go index d1b3620e3f9..76777274142 100644 --- a/gov2/s3/stubs/bucket_basics_stubs.go +++ b/gov2/s3/stubs/bucket_basics_stubs.go @@ -35,8 +35,9 @@ func StubHeadBucket(bucketName string, raiseErr *testtools.StubError) testtools. Input: &s3.HeadBucketInput{ Bucket: aws.String(bucketName), }, - Output: &s3.HeadBucketOutput{}, - Error: raiseErr, + Output: &s3.HeadBucketOutput{}, + Error: raiseErr, + SkipErrorTest: true, } } @@ -67,6 +68,16 @@ func StubPutObject(bucketName string, objectKey string, raiseErr *testtools.Stub } } +func StubHeadObject(bucketName string, objectKey string, raiseErr *testtools.StubError) testtools.Stub { + return testtools.Stub{ + OperationName: "HeadObject", + Input: &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: aws.String(objectKey)}, + Output: &s3.HeadObjectOutput{}, + SkipErrorTest: true, + Error: raiseErr, + } +} + func StubCreateMultipartUpload(bucketName string, objectKey string, uploadId string, raiseErr *testtools.StubError) testtools.Stub { return testtools.Stub{ @@ -170,16 +181,18 @@ func StubListObjectsV2(bucketName string, keys []string, raiseErr *testtools.Stu func StubDeleteObjects(bucketName string, keys []string, raiseErr *testtools.StubError) testtools.Stub { var objectIds []types.ObjectIdentifier + var delObjs []types.DeletedObject for _, key := range keys { objectIds = append(objectIds, types.ObjectIdentifier{Key: aws.String(key)}) + delObjs = append(delObjs, types.DeletedObject{Key: aws.String(key)}) } return testtools.Stub{ OperationName: "DeleteObjects", Input: &s3.DeleteObjectsInput{ Bucket: aws.String(bucketName), - Delete: &types.Delete{Objects: objectIds}, + Delete: &types.Delete{Objects: objectIds, Quiet: aws.Bool(true)}, }, - Output: &s3.DeleteObjectsOutput{}, + Output: &s3.DeleteObjectsOutput{Deleted: delObjs}, Error: raiseErr, } } diff --git a/gov2/workflows/s3_object_lock/actions/s3_actions.go b/gov2/workflows/s3_object_lock/actions/s3_actions.go index 3bb9e004920..8426bd3999b 100644 --- a/gov2/workflows/s3_object_lock/actions/s3_actions.go +++ b/gov2/workflows/s3_object_lock/actions/s3_actions.go @@ -3,6 +3,9 @@ package actions +// snippet-start:[gov2.workflows.s3.ObjectLock.S3Actions.complete] +// snippet-start:[gov2.workflows.s3.ObjectLock.S3Actions.struct] + import ( "bytes" "context" @@ -18,9 +21,6 @@ import ( "github.com/aws/smithy-go" ) -// snippet-start:[gov2.workflows.s3.ObjectLock.S3Actions.complete] -// snippet-start:[gov2.workflows.s3.ObjectLock.S3Actions.struct] - // S3Actions wraps S3 service actions. type S3Actions struct { S3Client *s3.Client @@ -397,7 +397,13 @@ func (actor S3Actions) DeleteObject(ctx context.Context, bucket string, key stri } } } else { - deleted = true + err = s3.NewObjectNotExistsWaiter(actor.S3Client).Wait( + ctx, &s3.HeadObjectInput{Bucket: aws.String(bucket), Key: aws.String(key)}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for object %s in bucket %s to be deleted.\n", key, bucket) + } else { + deleted = true + } } return deleted, err } @@ -437,6 +443,16 @@ func (actor S3Actions) DeleteObjects(ctx context.Context, bucket string, objects } err = fmt.Errorf("%s", *delOut.Errors[0].Message) } + } else { + for _, delObjs := range delOut.Deleted { + err = s3.NewObjectNotExistsWaiter(actor.S3Client).Wait( + ctx, &s3.HeadObjectInput{Bucket: aws.String(bucket), Key: delObjs.Key}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for object %s to be deleted.\n", *delObjs.Key) + } else { + log.Printf("Deleted %s.\n", *delObjs.Key) + } + } } return err } diff --git a/gov2/workflows/s3_object_lock/actions/s3_actions_test.go b/gov2/workflows/s3_object_lock/actions/s3_actions_test.go index 7ac078f782a..364f8714d6e 100644 --- a/gov2/workflows/s3_object_lock/actions/s3_actions_test.go +++ b/gov2/workflows/s3_object_lock/actions/s3_actions_test.go @@ -165,6 +165,7 @@ func TestDeleteObject(t *testing.T) { ctx, stubber, actor := enterTest() _, stubErr := wrapErr(raisedErr) stubber.Add(stubs.StubDeleteObject("amzn-s3-demo-bucket", "test-key", "test-version", true, stubErr)) + stubber.Add(stubs.StubHeadObject("amzn-s3-demo-bucket", "test-key", &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) _, actualErr := actor.DeleteObject(ctx, "amzn-s3-demo-bucket", "test-key", "test-version", true) expectedErr := raisedErr if _, ok := raisedErr.(*smithy.GenericAPIError); ok { @@ -181,6 +182,7 @@ func TestDeleteObjects(t *testing.T) { expectedErr, stubErr := wrapErr(&types.NoSuchBucket{}) stubber.Add(stubs.StubDeleteObjects("amzn-s3-demo-bucket", []types.ObjectVersion{{Key: aws.String("test-key"), VersionId: aws.String("test-version")}}, true, stubErr)) + stubber.Add(stubs.StubHeadObject("amzn-s3-demo-bucket", "test-key", &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) actualErr := actor.DeleteObjects(ctx, "amzn-s3-demo-bucket", []types.ObjectIdentifier{{Key: aws.String("test-key"), VersionId: aws.String("test-version")}}, true) verifyErr(expectedErr, actualErr, t) } diff --git a/gov2/workflows/s3_object_lock/stubs/s3_stubs.go b/gov2/workflows/s3_object_lock/stubs/s3_stubs.go index 7f5160adae2..999f63576d5 100644 --- a/gov2/workflows/s3_object_lock/stubs/s3_stubs.go +++ b/gov2/workflows/s3_object_lock/stubs/s3_stubs.go @@ -196,11 +196,13 @@ func StubDeleteObject(bucket string, key string, versionId string, bypassGoverna func StubDeleteObjects(bucket string, objVersions []types.ObjectVersion, bypassGov bool, raiseErr *testtools.StubError) testtools.Stub { delObjs := make([]types.ObjectIdentifier, len(objVersions)) + delOuts := make([]types.DeletedObject, len(objVersions)) for i := 0; i < len(objVersions); i++ { delObjs[i] = types.ObjectIdentifier{ Key: objVersions[i].Key, VersionId: objVersions[i].VersionId, } + delOuts[i] = types.DeletedObject{Key: objVersions[i].Key} } input := &s3.DeleteObjectsInput{ Bucket: aws.String(bucket), @@ -215,10 +217,11 @@ func StubDeleteObjects(bucket string, objVersions []types.ObjectVersion, bypassG return testtools.Stub{ OperationName: "DeleteObjects", Input: input, - Output: &s3.DeleteObjectsOutput{}, + Output: &s3.DeleteObjectsOutput{Deleted: delOuts}, Error: raiseErr, } } + func StubDeleteBucket(bucket string, raiseErr *testtools.StubError) testtools.Stub { return testtools.Stub{ OperationName: "DeleteBucket", diff --git a/gov2/workflows/s3_object_lock/workflows/resources.go b/gov2/workflows/s3_object_lock/workflows/resources.go index f25cff96d4f..24891632e12 100644 --- a/gov2/workflows/s3_object_lock/workflows/resources.go +++ b/gov2/workflows/s3_object_lock/workflows/resources.go @@ -3,10 +3,13 @@ package workflows +// snippet-start:[gov2.workflows.s3.ObjectLock.Resources.complete] + import ( "context" "log" "s3_object_lock/actions" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -14,8 +17,6 @@ import ( "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" ) -// snippet-start:[gov2.workflows.s3.ObjectLock.Resources.complete] - // DemoBucket contains metadata for buckets used in this example. type DemoBucket struct { name string @@ -75,6 +76,16 @@ func (resources *Resources) deleteBuckets(ctx context.Context) { panic(err) } } + for _, info := range createInfo { + bucket := resources.demoBuckets[info.name] + err := s3.NewBucketNotExistsWaiter(resources.s3Actions.S3Client).Wait( + ctx, &s3.HeadBucketInput{Bucket: aws.String(bucket.name)}, time.Minute) + if err != nil { + log.Printf("Failed attempt to wait for bucket %s to be deleted.\n", bucket.name) + } else { + log.Printf("Deleted %s.\n", bucket.name) + } + } resources.demoBuckets = map[string]*DemoBucket{} } diff --git a/gov2/workflows/s3_object_lock/workflows/s3_object_lock.go b/gov2/workflows/s3_object_lock/workflows/s3_object_lock.go index 6232a1e57bc..86c4fdbd048 100644 --- a/gov2/workflows/s3_object_lock/workflows/s3_object_lock.go +++ b/gov2/workflows/s3_object_lock/workflows/s3_object_lock.go @@ -3,6 +3,8 @@ package workflows +// snippet-start:[gov2.workflows.s3.ObjectLock.scenario.complete] + import ( "context" "fmt" @@ -18,8 +20,6 @@ import ( "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" ) -// snippet-start:[gov2.workflows.s3.ObjectLock.scenario.complete] - // ObjectLockScenario contains the steps to run the S3 Object Lock workflow. type ObjectLockScenario struct { questioner demotools.IQuestioner diff --git a/gov2/workflows/s3_object_lock/workflows/s3_object_lock_test.go b/gov2/workflows/s3_object_lock/workflows/s3_object_lock_test.go index b953bd9de08..6b23213ac20 100644 --- a/gov2/workflows/s3_object_lock/workflows/s3_object_lock_test.go +++ b/gov2/workflows/s3_object_lock/workflows/s3_object_lock_test.go @@ -104,9 +104,11 @@ func (scenTest *ObjectLockScenarioTest) SetupDataAndStubs() []testtools.Stub { // DeleteObject stubList = append(stubList, stubListAll()...) stubList = append(stubList, stubs.StubDeleteObject(standardBucket, *objVersions[0].Key, *objVersions[0].VersionId, false, nil)) + stubList = append(stubList, stubs.StubHeadObject(standardBucket, *objVersions[0].Key, &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) // DeleteRetentionObject stubList = append(stubList, stubListAll()...) stubList = append(stubList, stubs.StubDeleteObject(standardBucket, *objVersions[0].Key, *objVersions[0].VersionId, true, nil)) + stubList = append(stubList, stubs.StubHeadObject(standardBucket, *objVersions[0].Key, &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) // OverwriteObject stubList = append(stubList, stubListAll()...) stubList = append(stubList, stubs.StubPutObject(standardBucket, *objVersions[0].Key, &checksum, nil)) @@ -130,8 +132,16 @@ func (scenTest *ObjectLockScenarioTest) SetupDataAndStubs() []testtools.Stub { stubList = append(stubList, stubs.StubPutObjectLegalHold(bucket, *version.Key, *version.VersionId, types.ObjectLockLegalHoldStatusOff, nil)) } stubList = append(stubList, stubs.StubDeleteObjects(bucket, objVersions, info.name != "standard-bucket", nil)) + for _, ver := range objVersions { + stubList = append(stubList, stubs.StubHeadObject(bucket, *ver.Key, + &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) + } stubList = append(stubList, stubs.StubDeleteBucket(bucket, nil)) } + for _, info := range createInfo { + bucket := fmt.Sprintf("%s.%s", bucketPrefix, info.name) + stubList = append(stubList, stubs.StubHeadBucket(bucket, &testtools.StubError{Err: &types.NotFound{}, ContinueAfter: true})) + } return stubList }