Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/config.rst
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ Name Description
=========================== ================
anonymous Allow the access of public S3 buckets without the need to provide AWS credentials. Any service that does not accept unsigned requests will return a service access error.
s3Acl Allow the setting of a predefined bucket permissions also known as *canned ACL*. Permitted values are ``Private``, ``PublicRead``, ``PublicReadWrite``, ``AuthenticatedRead``, ``LogDeliveryWrite``, ``BucketOwnerRead``, ``BucketOwnerFullControl`` and ``AwsExecRead``. See `Amazon docs <https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl>`_ for details.
s3ChecksumAlgorithm Automatically compute and store the checksum of S3 objects when uploading them, one of ``CRC32``, ``CRC32C``, ``SHA1``, ``SHA256`` (requires version ``23.02.0-edge`` or later).
connectionTimeout The amount of time to wait (in milliseconds) when initially establishing a connection before giving up and timing out.
endpoint The AWS S3 API entry point e.g. `s3-us-west-1.amazonaws.com`.
glacierAutoRetrieval Enable auto retrieval of S3 objects stored with Glacier class store (EXPERIMENTAL. default: ``false``, requires version ``22.12.0-edge`` or later).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -945,6 +945,8 @@ else if (accessKey == null && secretKey == null) {

// set the client acl
client.setCannedAcl(getProp(props, "s_3_acl", "s3_acl", "s3Acl"));
// TODO: set checksum algorithm ?
// TODO: S3CopyObjectOperation setChecksumAlgorithm()
client.setStorageEncryption(props.getProperty("storage_encryption"));
client.setKmsKeyId(props.getProperty("storage_kms_key_id"));
client.setUploadChunkSize(props.getProperty("upload_chunk_size"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ import static nextflow.cloud.aws.util.AwsHelper.parseS3Acl
@CompileStatic
class AwsOptions implements CloudTransferOptions {

public static final List<String> VALID_CHECKSUM_ALGORITHMS = ['CRC32','CRC32C','SHA1','SHA256']

public static final List<String> VALID_RETRY_MODES = ['legacy','standard','adaptive']

public static final int DEFAULT_AWS_MAX_ATTEMPTS = 5
Expand Down Expand Up @@ -104,6 +106,11 @@ class AwsOptions implements CloudTransferOptions {
*/
CannedAccessControlList s3Acl

/**
* S3 checksum algorithm
*/
String s3ChecksumAlgorithm

/**
* @return A list of volume mounts using the docker cli convention ie. `/some/path` or `/some/path:/container/path` or `/some/path:/container/path:ro`
*/
Expand All @@ -120,6 +127,7 @@ class AwsOptions implements CloudTransferOptions {
AwsOptions(Session session) {
cliPath = getCliPath0(session)
s3Acl = parseS3Acl(session.config.navigate('aws.client.s3Acl') as String)
s3ChecksumAlgorithm = session.config.navigate('aws.client.s3ChecksumAlgorithm') as String
debug = session.config.navigate('aws.client.debug') as Boolean
storageClass = session.config.navigate('aws.client.uploadStorageClass') as String
storageKmsKeyId = session.config.navigate('aws.client.storageKmsKeyId') as String
Expand All @@ -136,6 +144,8 @@ class AwsOptions implements CloudTransferOptions {
retryMode = session.config.navigate('aws.batch.retryMode', 'standard')
shareIdentifier = session.config.navigate('aws.batch.shareIdentifier')
schedulingPriority = session.config.navigate('aws.batch.schedulingPriority', 0) as Integer
if( s3ChecksumAlgorithm && s3ChecksumAlgorithm !in VALID_CHECKSUM_ALGORITHMS )
log.warn "Unexpected value for 'aws.client.s3ChecksumAlgorithm' config setting - offending value: $s3ChecksumAlgorithm - valid values: ${VALID_CHECKSUM_ALGORITHMS.join(',')}"
if( retryMode == 'built-in' )
retryMode = null // this force falling back on NF built-in retry mode instead of delegating to AWS CLI tool
if( retryMode && retryMode !in VALID_RETRY_MODES )
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ class S3BashLib extends BashFunLib<S3BashLib> {
private String cli = 'aws'
private String retryMode
private String acl = ''
private String checksumAlgorithm = ''

S3BashLib withCliPath(String cliPath) {
if( cliPath )
Expand Down Expand Up @@ -77,6 +78,12 @@ class S3BashLib extends BashFunLib<S3BashLib> {
return this
}

S3BashLib withChecksumAlgorithm(String value) {
if( value )
this.checksumAlgorithm = "--checksum-algorithm $value "
return this
}

protected String retryEnv() {
if( !retryMode )
return ''
Expand All @@ -94,11 +101,11 @@ class S3BashLib extends BashFunLib<S3BashLib> {
local name=\$1
local s3path=\$2
if [[ "\$name" == - ]]; then
$cli s3 cp --only-show-errors ${debug}${acl}${storageEncryption}${storageKmsKeyId}--storage-class $storageClass - "\$s3path"
$cli s3 cp --only-show-errors ${debug}${acl}${checksumAlgorithm}${storageEncryption}${storageKmsKeyId}--storage-class $storageClass - "\$s3path"
elif [[ -d "\$name" ]]; then
$cli s3 cp --only-show-errors --recursive ${debug}${acl}${storageEncryption}${storageKmsKeyId}--storage-class $storageClass "\$name" "\$s3path/\$name"
$cli s3 cp --only-show-errors --recursive ${debug}${acl}${checksumAlgorithm}${storageEncryption}${storageKmsKeyId}--storage-class $storageClass "\$name" "\$s3path/\$name"
else
$cli s3 cp --only-show-errors ${debug}${acl}${storageEncryption}${storageKmsKeyId}--storage-class $storageClass "\$name" "\$s3path/\$name"
$cli s3 cp --only-show-errors ${debug}${acl}${checksumAlgorithm}${storageEncryption}${storageKmsKeyId}--storage-class $storageClass "\$name" "\$s3path/\$name"
fi
}

Expand Down Expand Up @@ -133,6 +140,7 @@ class S3BashLib extends BashFunLib<S3BashLib> {
.withRetryMode( opts.retryMode )
.withDebug( opts.debug )
.withAcl( opts.s3Acl )
.withChecksumAlgorithm( opts.s3ChecksumAlgorithm )
}

static String script(AwsOptions opts) {
Expand Down