Skip to content

Commit faa7118

Browse files
feat: (storage) added field aws_s3_compatible_data_source for google_storage_transfer_job resource (#14835)
1 parent 6b83964 commit faa7118

File tree

4 files changed

+321
-82
lines changed

4 files changed

+321
-82
lines changed

mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go

Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ var (
8383
"transfer_spec.0.azure_blob_storage_data_source",
8484
"transfer_spec.0.posix_data_source",
8585
"transfer_spec.0.hdfs_data_source",
86+
"transfer_spec.0.aws_s3_compatible_data_source",
8687
}
8788
transferSpecDataSinkKeys = []string{
8889
"transfer_spec.0.gcs_data_sink",
@@ -293,6 +294,14 @@ func ResourceStorageTransferJob() *schema.Resource {
293294
ExactlyOneOf: transferSpecDataSourceKeys,
294295
Description: `An HDFS Storage data source.`,
295296
},
297+
"aws_s3_compatible_data_source": {
298+
Type: schema.TypeList,
299+
Optional: true,
300+
MaxItems: 1,
301+
Elem: awsS3CompatibleDataSchema(),
302+
ExactlyOneOf: transferSpecDataSourceKeys,
303+
Description: `An AWS S3 Compatible data source.`,
304+
},
296305
},
297306
},
298307
Description: `Transfer specification.`,
@@ -887,6 +896,69 @@ func azureBlobStorageDataSchema() *schema.Resource {
887896
}
888897
}
889898

899+
func awsS3CompatibleDataSchema() *schema.Resource {
900+
return &schema.Resource{
901+
Schema: map[string]*schema.Schema{
902+
"bucket_name": {
903+
Type: schema.TypeString,
904+
Required: true,
905+
Description: `Name of the bucket.`,
906+
},
907+
"path": {
908+
Type: schema.TypeString,
909+
Optional: true,
910+
Description: `Specifies the path to transfer objects.`,
911+
},
912+
"endpoint": {
913+
Type: schema.TypeString,
914+
Required: true,
915+
Description: `Endpoint of the storage service.`,
916+
},
917+
"region": {
918+
Type: schema.TypeString,
919+
Optional: true,
920+
Description: `Specifies the region to sign requests with. This can be left blank if requests should be signed with an empty region.`,
921+
},
922+
"s3_metadata": {
923+
Type: schema.TypeList,
924+
Optional: true,
925+
MaxItems: 1,
926+
Description: `S3 compatible metadata.`,
927+
Elem: &schema.Resource{
928+
Schema: map[string]*schema.Schema{
929+
"auth_method": {
930+
Type: schema.TypeString,
931+
Optional: true,
932+
ValidateFunc: verify.ValidateEnum([]string{"AUTH_METHOD_UNSPECIFIED", "AUTH_METHOD_AWS_SIGNATURE_V4", "AUTH_METHOD_AWS_SIGNATURE_V2"}),
933+
Description: `Authentication and authorization method used by the storage service. When not specified, Transfer Service will attempt to determine right auth method to use.`,
934+
},
935+
"request_model": {
936+
Type: schema.TypeString,
937+
Optional: true,
938+
Default: "REQUEST_MODEL_VIRTUAL_HOSTED_STYLE",
939+
ValidateFunc: verify.ValidateEnum([]string{"REQUEST_MODEL_VIRTUAL_HOSTED_STYLE", "REQUEST_MODEL_UNSPECIFIED", "REQUEST_MODEL_PATH_STYLE"}),
940+
Description: `API request model used to call the storage service. When not specified, the default value of RequestModel REQUEST_MODEL_VIRTUAL_HOSTED_STYLE is used.`,
941+
},
942+
"protocol": {
943+
Type: schema.TypeString,
944+
Optional: true,
945+
Default: "NETWORK_PROTOCOL_HTTPS",
946+
ValidateFunc: verify.ValidateEnum([]string{"NETWORK_PROTOCOL_UNSPECIFIED", "NETWORK_PROTOCOL_HTTPS", "NETWORK_PROTOCOL_HTTP"}),
947+
Description: `The network protocol of the agent. When not specified, the default value of NetworkProtocol NETWORK_PROTOCOL_HTTPS is used.`,
948+
},
949+
"list_api": {
950+
Type: schema.TypeString,
951+
Optional: true,
952+
ValidateFunc: verify.ValidateEnum([]string{"LIST_API_UNSPECIFIED", "LIST_OBJECTS_V2", "LIST_OBJECTS"}),
953+
Description: `The Listing API to use for discovering objects. When not specified, Transfer Service will attempt to determine the right API to use.`,
954+
},
955+
},
956+
},
957+
},
958+
},
959+
}
960+
}
961+
890962
func diffSuppressEmptyStartTimeOfDay(k, old, new string, d *schema.ResourceData) bool {
891963
return k == "schedule.0.start_time_of_day.#" && old == "1" && new == "0"
892964
}
@@ -1501,6 +1573,73 @@ func flattenHdfsData(hdfsData *storagetransfer.HdfsData) []map[string]interface{
15011573
return []map[string]interface{}{data}
15021574
}
15031575

1576+
func expandAwsS3CompatibleData(awsS3CompatibleDataSchema []interface{}) *storagetransfer.AwsS3CompatibleData {
1577+
if len(awsS3CompatibleDataSchema) == 0 || awsS3CompatibleDataSchema[0] == nil {
1578+
return nil
1579+
}
1580+
1581+
awsS3CompatibleData := awsS3CompatibleDataSchema[0].(map[string]interface{})
1582+
result := &storagetransfer.AwsS3CompatibleData{
1583+
BucketName: awsS3CompatibleData["bucket_name"].(string),
1584+
Path: awsS3CompatibleData["path"].(string),
1585+
Endpoint: awsS3CompatibleData["endpoint"].(string),
1586+
Region: awsS3CompatibleData["region"].(string),
1587+
}
1588+
1589+
if v, ok := awsS3CompatibleData["s3_metadata"].([]interface{}); ok {
1590+
result.S3Metadata = expandS3Metadata(v)
1591+
}
1592+
return result
1593+
}
1594+
1595+
func expandS3Metadata(s3Metadata []interface{}) *storagetransfer.S3CompatibleMetadata {
1596+
if len(s3Metadata) == 0 || s3Metadata[0] == nil {
1597+
return nil
1598+
}
1599+
metadata := s3Metadata[0].(map[string]interface{})
1600+
data := &storagetransfer.S3CompatibleMetadata{
1601+
AuthMethod: metadata["auth_method"].(string),
1602+
ListApi: metadata["list_api"].(string),
1603+
RequestModel: metadata["request_model"].(string),
1604+
Protocol: metadata["protocol"].(string),
1605+
}
1606+
return data
1607+
}
1608+
1609+
func flattenAwsS3CompatibleData(awsS3CompatibleData *storagetransfer.AwsS3CompatibleData, d *schema.ResourceData) []map[string]interface{} {
1610+
data := map[string]interface{}{
1611+
"bucket_name": awsS3CompatibleData.BucketName,
1612+
"path": awsS3CompatibleData.Path,
1613+
"endpoint": awsS3CompatibleData.Endpoint,
1614+
"region": awsS3CompatibleData.Region,
1615+
}
1616+
if awsS3CompatibleData.S3Metadata != nil {
1617+
data["s3_metadata"] = flattenS3MetaData(awsS3CompatibleData.S3Metadata, d)
1618+
}
1619+
1620+
return []map[string]interface{}{data}
1621+
}
1622+
1623+
func flattenS3MetaData(s3MetaData *storagetransfer.S3CompatibleMetadata, d *schema.ResourceData) []map[string]interface{} {
1624+
s3Metadata := map[string]interface{}{
1625+
"protocol": s3MetaData.Protocol,
1626+
"request_model": s3MetaData.RequestModel,
1627+
}
1628+
if d.Get("transfer_spec.0.aws_s3_compatible_data_source.0.s3_metadata.0.auth_method") == "AUTH_METHOD_UNSPECIFIED" {
1629+
s3Metadata["auth_method"] = d.Get("transfer_spec.0.aws_s3_compatible_data_source.0.s3_metadata.0.auth_method")
1630+
} else {
1631+
s3Metadata["auth_method"] = s3MetaData.AuthMethod
1632+
}
1633+
1634+
if d.Get("transfer_spec.0.aws_s3_compatible_data_source.0.s3_metadata.0.list_api") == "LIST_API_UNSPECIFIED" {
1635+
s3Metadata["list_api"] = d.Get("transfer_spec.0.aws_s3_compatible_data_source.0.s3_metadata.0.list_api")
1636+
} else {
1637+
s3Metadata["list_api"] = s3MetaData.ListApi
1638+
}
1639+
1640+
return []map[string]interface{}{s3Metadata}
1641+
}
1642+
15041643
func expandAzureCredentials(azureCredentials []interface{}) *storagetransfer.AzureCredentials {
15051644
if len(azureCredentials) == 0 || azureCredentials[0] == nil {
15061645
return nil
@@ -1665,6 +1804,7 @@ func expandTransferSpecs(transferSpecs []interface{}) *storagetransfer.TransferS
16651804
AzureBlobStorageDataSource: expandAzureBlobStorageData(transferSpec["azure_blob_storage_data_source"].([]interface{})),
16661805
PosixDataSource: expandPosixData(transferSpec["posix_data_source"].([]interface{})),
16671806
HdfsDataSource: expandHdfsData(transferSpec["hdfs_data_source"].([]interface{})),
1807+
AwsS3CompatibleDataSource: expandAwsS3CompatibleData(transferSpec["aws_s3_compatible_data_source"].([]interface{})),
16681808
}
16691809
}
16701810

@@ -1705,6 +1845,8 @@ func flattenTransferSpec(transferSpec *storagetransfer.TransferSpec, d *schema.R
17051845
data["posix_data_source"] = flattenPosixData(transferSpec.PosixDataSource)
17061846
} else if transferSpec.HdfsDataSource != nil {
17071847
data["hdfs_data_source"] = flattenHdfsData(transferSpec.HdfsDataSource)
1848+
} else if transferSpec.AwsS3CompatibleDataSource != nil {
1849+
data["aws_s3_compatible_data_source"] = flattenAwsS3CompatibleData(transferSpec.AwsS3CompatibleDataSource, d)
17081850
}
17091851

17101852
return []map[string]interface{}{data}

mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job_meta.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,14 @@ fields:
5757
- field: 'transfer_spec.azure_blob_storage_data_source.federated_identity_config.tenant_id'
5858
- field: 'transfer_spec.azure_blob_storage_data_source.path'
5959
- field: 'transfer_spec.azure_blob_storage_data_source.storage_account'
60+
- field: 'transfer_spec.aws_s3_compatible_data_source.bucket_name'
61+
- field: 'transfer_spec.aws_s3_compatible_data_source.path'
62+
- field: 'transfer_spec.aws_s3_compatible_data_source.region'
63+
- field: 'transfer_spec.aws_s3_compatible_data_source.endpoint'
64+
- field: 'transfer_spec.aws_s3_compatible_data_source.s3_metadata.request_model'
65+
- field: 'transfer_spec.aws_s3_compatible_data_source.s3_metadata.protocol'
66+
- field: 'transfer_spec.aws_s3_compatible_data_source.s3_metadata.list_api'
67+
- field: 'transfer_spec.aws_s3_compatible_data_source.s3_metadata.auth_method'
6068
- field: 'transfer_spec.gcs_data_sink.bucket_name'
6169
- field: 'transfer_spec.gcs_data_sink.path'
6270
- field: 'transfer_spec.gcs_data_source.bucket_name'

0 commit comments

Comments
 (0)