From 5d0952e14f57b2e1a99f6be954af0d43b64ccc9e Mon Sep 17 00:00:00 2001 From: Phillip Jones Date: Fri, 7 Mar 2025 19:05:33 -0800 Subject: [PATCH] Added tested s3-compatible providers for Super Slurper --- .../docs/r2/data-migration/super-slurper.mdx | 60 +++++++++++-------- 1 file changed, 35 insertions(+), 25 deletions(-) diff --git a/src/content/docs/r2/data-migration/super-slurper.mdx b/src/content/docs/r2/data-migration/super-slurper.mdx index fed96c92ddfafe..5e472f0a4578a5 100644 --- a/src/content/docs/r2/data-migration/super-slurper.mdx +++ b/src/content/docs/r2/data-migration/super-slurper.mdx @@ -6,18 +6,17 @@ learning_center: link: https://www.cloudflare.com/learning/cloud/what-is-data-migration/ sidebar: order: 1 - --- -import { InlineBadge, Render } from "~/components" +import { InlineBadge, Render } from "~/components"; Super Slurper allows you to quickly and easily copy objects from other cloud providers to an R2 bucket of your choice. Migration jobs: -* Preserve custom object metadata from source bucket by copying them on the migrated objects on R2. -* Do not delete any objects from source bucket. -* Use TLS encryption over HTTPS connections for safe and private object transfers. +- Preserve custom object metadata from source bucket by copying them on the migrated objects on R2. +- Do not delete any objects from source bucket. +- Use TLS encryption over HTTPS connections for safe and private object transfers. ## When to use Super Slurper @@ -52,10 +51,27 @@ This setting determines what happens when an object being copied from the source Cloudflare currently supports copying data from the following cloud object storage providers to R2: -* Amazon S3 -* Cloudflare R2 -* Google Cloud Storage (GCS) -* All S3-compatible storage providers +- Amazon S3 +- Cloudflare R2 +- Google Cloud Storage (GCS) +- All S3-compatible storage providers + +### Tested S3-compatible storage providers + +The following S3-compatible storage providers have been tested and verified to work with Super Slurper: + +- Backblaze B2 +- DigitalOcean Spaces +- Scaleway Object Storage +- Wasabi Cloud Object Storage + +Super Slurper should support transfers from all S3-compatible storage providers, but the ones listed have been explicitly tested. + +:::note + +Have you tested and verified another S3-compatible provider? [Open a pull request](https://github.com/cloudflare/cloudflare-docs/edit/production/src/content/docs/r2/data-migration/super-slurper.mdx) or [create a GitHub issue](https://github.com/cloudflare/cloudflare-docs/issues/new). + +::: ## Create credentials for storage providers @@ -70,20 +86,14 @@ To create credentials with the correct permissions: ```json { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:Get*", - "s3:List*" - ], - "Resource": [ - "arn:aws:s3:::", - "arn:aws:s3:::/*" - ] - } - ] + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["s3:Get*", "s3:List*"], + "Resource": ["arn:aws:s3:::", "arn:aws:s3:::/*"] + } + ] } ``` @@ -124,5 +134,5 @@ You can now use this JSON key file when enabling Super Slurper. Objects stored using AWS S3 [archival storage classes](https://aws.amazon.com/s3/storage-classes/#Archive) will be skipped and need to be copied separately. Specifically: -* Files stored using S3 Glacier tiers (not including Glacier Instant Retrieval) will be skipped and logged in the migration log. -* Files stored using S3 Intelligent Tiering and placed in Deep Archive tier will be skipped and logged in the migration log. +- Files stored using S3 Glacier tiers (not including Glacier Instant Retrieval) will be skipped and logged in the migration log. +- Files stored using S3 Intelligent Tiering and placed in Deep Archive tier will be skipped and logged in the migration log.