|
83 | 83 | "transfer_spec.0.azure_blob_storage_data_source", |
84 | 84 | "transfer_spec.0.posix_data_source", |
85 | 85 | "transfer_spec.0.hdfs_data_source", |
| 86 | + "transfer_spec.0.aws_s3_compatible_data_source", |
86 | 87 | } |
87 | 88 | transferSpecDataSinkKeys = []string{ |
88 | 89 | "transfer_spec.0.gcs_data_sink", |
@@ -293,6 +294,14 @@ func ResourceStorageTransferJob() *schema.Resource { |
293 | 294 | ExactlyOneOf: transferSpecDataSourceKeys, |
294 | 295 | Description: `An HDFS Storage data source.`, |
295 | 296 | }, |
| 297 | + "aws_s3_compatible_data_source": { |
| 298 | + Type: schema.TypeList, |
| 299 | + Optional: true, |
| 300 | + MaxItems: 1, |
| 301 | + Elem: awsS3CompatibleDataSchema(), |
| 302 | + ExactlyOneOf: transferSpecDataSourceKeys, |
| 303 | + Description: `An AWS S3 Compatible data source.`, |
| 304 | + }, |
296 | 305 | }, |
297 | 306 | }, |
298 | 307 | Description: `Transfer specification.`, |
@@ -887,6 +896,69 @@ func azureBlobStorageDataSchema() *schema.Resource { |
887 | 896 | } |
888 | 897 | } |
889 | 898 |
|
| 899 | +func awsS3CompatibleDataSchema() *schema.Resource { |
| 900 | + return &schema.Resource{ |
| 901 | + Schema: map[string]*schema.Schema{ |
| 902 | + "bucket_name": { |
| 903 | + Type: schema.TypeString, |
| 904 | + Required: true, |
| 905 | + Description: `Name of the bucket.`, |
| 906 | + }, |
| 907 | + "path": { |
| 908 | + Type: schema.TypeString, |
| 909 | + Optional: true, |
| 910 | + Description: `Specifies the path to transfer objects.`, |
| 911 | + }, |
| 912 | + "endpoint": { |
| 913 | + Type: schema.TypeString, |
| 914 | + Required: true, |
| 915 | + Description: `Endpoint of the storage service.`, |
| 916 | + }, |
| 917 | + "region": { |
| 918 | + Type: schema.TypeString, |
| 919 | + Optional: true, |
| 920 | + Description: `Specifies the region to sign requests with. This can be left blank if requests should be signed with an empty region.`, |
| 921 | + }, |
| 922 | + "s3_metadata": { |
| 923 | + Type: schema.TypeList, |
| 924 | + Optional: true, |
| 925 | + MaxItems: 1, |
| 926 | + Description: `S3 compatible metadata.`, |
| 927 | + Elem: &schema.Resource{ |
| 928 | + Schema: map[string]*schema.Schema{ |
| 929 | + "auth_method": { |
| 930 | + Type: schema.TypeString, |
| 931 | + Optional: true, |
| 932 | + ValidateFunc: verify.ValidateEnum([]string{"AUTH_METHOD_UNSPECIFIED", "AUTH_METHOD_AWS_SIGNATURE_V4", "AUTH_METHOD_AWS_SIGNATURE_V2"}), |
| 933 | + Description: `Authentication and authorization method used by the storage service. When not specified, Transfer Service will attempt to determine right auth method to use.`, |
| 934 | + }, |
| 935 | + "request_model": { |
| 936 | + Type: schema.TypeString, |
| 937 | + Optional: true, |
| 938 | + Default: "REQUEST_MODEL_VIRTUAL_HOSTED_STYLE", |
| 939 | + ValidateFunc: verify.ValidateEnum([]string{"REQUEST_MODEL_VIRTUAL_HOSTED_STYLE", "REQUEST_MODEL_UNSPECIFIED", "REQUEST_MODEL_PATH_STYLE"}), |
| 940 | + Description: `API request model used to call the storage service. When not specified, the default value of RequestModel REQUEST_MODEL_VIRTUAL_HOSTED_STYLE is used.`, |
| 941 | + }, |
| 942 | + "protocol": { |
| 943 | + Type: schema.TypeString, |
| 944 | + Optional: true, |
| 945 | + Default: "NETWORK_PROTOCOL_HTTPS", |
| 946 | + ValidateFunc: verify.ValidateEnum([]string{"NETWORK_PROTOCOL_UNSPECIFIED", "NETWORK_PROTOCOL_HTTPS", "NETWORK_PROTOCOL_HTTP"}), |
| 947 | + Description: `The network protocol of the agent. When not specified, the default value of NetworkProtocol NETWORK_PROTOCOL_HTTPS is used.`, |
| 948 | + }, |
| 949 | + "list_api": { |
| 950 | + Type: schema.TypeString, |
| 951 | + Optional: true, |
| 952 | + ValidateFunc: verify.ValidateEnum([]string{"LIST_API_UNSPECIFIED", "LIST_OBJECTS_V2", "LIST_OBJECTS"}), |
| 953 | + Description: `The Listing API to use for discovering objects. When not specified, Transfer Service will attempt to determine the right API to use.`, |
| 954 | + }, |
| 955 | + }, |
| 956 | + }, |
| 957 | + }, |
| 958 | + }, |
| 959 | + } |
| 960 | +} |
| 961 | + |
890 | 962 | func diffSuppressEmptyStartTimeOfDay(k, old, new string, d *schema.ResourceData) bool { |
891 | 963 | return k == "schedule.0.start_time_of_day.#" && old == "1" && new == "0" |
892 | 964 | } |
@@ -1501,6 +1573,73 @@ func flattenHdfsData(hdfsData *storagetransfer.HdfsData) []map[string]interface{ |
1501 | 1573 | return []map[string]interface{}{data} |
1502 | 1574 | } |
1503 | 1575 |
|
| 1576 | +func expandAwsS3CompatibleData(awsS3CompatibleDataSchema []interface{}) *storagetransfer.AwsS3CompatibleData { |
| 1577 | + if len(awsS3CompatibleDataSchema) == 0 || awsS3CompatibleDataSchema[0] == nil { |
| 1578 | + return nil |
| 1579 | + } |
| 1580 | + |
| 1581 | + awsS3CompatibleData := awsS3CompatibleDataSchema[0].(map[string]interface{}) |
| 1582 | + result := &storagetransfer.AwsS3CompatibleData{ |
| 1583 | + BucketName: awsS3CompatibleData["bucket_name"].(string), |
| 1584 | + Path: awsS3CompatibleData["path"].(string), |
| 1585 | + Endpoint: awsS3CompatibleData["endpoint"].(string), |
| 1586 | + Region: awsS3CompatibleData["region"].(string), |
| 1587 | + } |
| 1588 | + |
| 1589 | + if v, ok := awsS3CompatibleData["s3_metadata"].([]interface{}); ok { |
| 1590 | + result.S3Metadata = expandS3Metadata(v) |
| 1591 | + } |
| 1592 | + return result |
| 1593 | +} |
| 1594 | + |
| 1595 | +func expandS3Metadata(s3Metadata []interface{}) *storagetransfer.S3CompatibleMetadata { |
| 1596 | + if len(s3Metadata) == 0 || s3Metadata[0] == nil { |
| 1597 | + return nil |
| 1598 | + } |
| 1599 | + metadata := s3Metadata[0].(map[string]interface{}) |
| 1600 | + data := &storagetransfer.S3CompatibleMetadata{ |
| 1601 | + AuthMethod: metadata["auth_method"].(string), |
| 1602 | + ListApi: metadata["list_api"].(string), |
| 1603 | + RequestModel: metadata["request_model"].(string), |
| 1604 | + Protocol: metadata["protocol"].(string), |
| 1605 | + } |
| 1606 | + return data |
| 1607 | +} |
| 1608 | + |
| 1609 | +func flattenAwsS3CompatibleData(awsS3CompatibleData *storagetransfer.AwsS3CompatibleData, d *schema.ResourceData) []map[string]interface{} { |
| 1610 | + data := map[string]interface{}{ |
| 1611 | + "bucket_name": awsS3CompatibleData.BucketName, |
| 1612 | + "path": awsS3CompatibleData.Path, |
| 1613 | + "endpoint": awsS3CompatibleData.Endpoint, |
| 1614 | + "region": awsS3CompatibleData.Region, |
| 1615 | + } |
| 1616 | + if awsS3CompatibleData.S3Metadata != nil { |
| 1617 | + data["s3_metadata"] = flattenS3MetaData(awsS3CompatibleData.S3Metadata, d) |
| 1618 | + } |
| 1619 | + |
| 1620 | + return []map[string]interface{}{data} |
| 1621 | +} |
| 1622 | + |
| 1623 | +func flattenS3MetaData(s3MetaData *storagetransfer.S3CompatibleMetadata, d *schema.ResourceData) []map[string]interface{} { |
| 1624 | + s3Metadata := map[string]interface{}{ |
| 1625 | + "protocol": s3MetaData.Protocol, |
| 1626 | + "request_model": s3MetaData.RequestModel, |
| 1627 | + } |
| 1628 | + if d.Get("transfer_spec.0.aws_s3_compatible_data_source.0.s3_metadata.0.auth_method") == "AUTH_METHOD_UNSPECIFIED" { |
| 1629 | + s3Metadata["auth_method"] = d.Get("transfer_spec.0.aws_s3_compatible_data_source.0.s3_metadata.0.auth_method") |
| 1630 | + } else { |
| 1631 | + s3Metadata["auth_method"] = s3MetaData.AuthMethod |
| 1632 | + } |
| 1633 | + |
| 1634 | + if d.Get("transfer_spec.0.aws_s3_compatible_data_source.0.s3_metadata.0.list_api") == "LIST_API_UNSPECIFIED" { |
| 1635 | + s3Metadata["list_api"] = d.Get("transfer_spec.0.aws_s3_compatible_data_source.0.s3_metadata.0.list_api") |
| 1636 | + } else { |
| 1637 | + s3Metadata["list_api"] = s3MetaData.ListApi |
| 1638 | + } |
| 1639 | + |
| 1640 | + return []map[string]interface{}{s3Metadata} |
| 1641 | +} |
| 1642 | + |
1504 | 1643 | func expandAzureCredentials(azureCredentials []interface{}) *storagetransfer.AzureCredentials { |
1505 | 1644 | if len(azureCredentials) == 0 || azureCredentials[0] == nil { |
1506 | 1645 | return nil |
@@ -1665,6 +1804,7 @@ func expandTransferSpecs(transferSpecs []interface{}) *storagetransfer.TransferS |
1665 | 1804 | AzureBlobStorageDataSource: expandAzureBlobStorageData(transferSpec["azure_blob_storage_data_source"].([]interface{})), |
1666 | 1805 | PosixDataSource: expandPosixData(transferSpec["posix_data_source"].([]interface{})), |
1667 | 1806 | HdfsDataSource: expandHdfsData(transferSpec["hdfs_data_source"].([]interface{})), |
| 1807 | + AwsS3CompatibleDataSource: expandAwsS3CompatibleData(transferSpec["aws_s3_compatible_data_source"].([]interface{})), |
1668 | 1808 | } |
1669 | 1809 | } |
1670 | 1810 |
|
@@ -1705,6 +1845,8 @@ func flattenTransferSpec(transferSpec *storagetransfer.TransferSpec, d *schema.R |
1705 | 1845 | data["posix_data_source"] = flattenPosixData(transferSpec.PosixDataSource) |
1706 | 1846 | } else if transferSpec.HdfsDataSource != nil { |
1707 | 1847 | data["hdfs_data_source"] = flattenHdfsData(transferSpec.HdfsDataSource) |
| 1848 | + } else if transferSpec.AwsS3CompatibleDataSource != nil { |
| 1849 | + data["aws_s3_compatible_data_source"] = flattenAwsS3CompatibleData(transferSpec.AwsS3CompatibleDataSource, d) |
1708 | 1850 | } |
1709 | 1851 |
|
1710 | 1852 | return []map[string]interface{}{data} |
|
0 commit comments