Skip to content

Commit 454cce0

Browse files
Add actions.save_findings.output_config.storage_path field to google_data_loss_prevention_job_trigger. (#15263) (#10816)
[upstream:5de0ca90b3e3a57431f94b5a20c6c8d3456f238b] Signed-off-by: Modular Magician <[email protected]>
1 parent a73b8c8 commit 454cce0

File tree

5 files changed

+217
-15
lines changed

5 files changed

+217
-15
lines changed

.changelog/15263.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
dlp: added `actions.save_findings.output_config.storage_path` field to `google_data_loss_prevention_job_trigger` resource
3+
```

google-beta/services/datalossprevention/resource_data_loss_prevention_job_trigger.go

Lines changed: 87 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -670,9 +670,43 @@ is 1,024 characters.`,
670670
MaxItems: 1,
671671
Elem: &schema.Resource{
672672
Schema: map[string]*schema.Schema{
673+
"output_schema": {
674+
Type: schema.TypeString,
675+
Optional: true,
676+
ValidateFunc: verify.ValidateEnum([]string{"BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS", ""}),
677+
Description: `Schema used for writing the findings for Inspect jobs. This field is only used for
678+
Inspect and must be unspecified for Risk jobs. Columns are derived from the Finding
679+
object. If appending to an existing table, any columns from the predefined schema
680+
that are missing will be added. No columns in the existing table will be deleted.
681+
682+
If unspecified, then all available columns will be used for a new table or an (existing)
683+
table with no schema, and no changes will be made to an existing table that has a schema.
684+
Only for use with external storage. Possible values: ["BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS"]`,
685+
},
686+
"storage_path": {
687+
Type: schema.TypeList,
688+
Optional: true,
689+
Description: `Store findings in an existing Cloud Storage bucket. Files will be generated with the job ID and file part number
690+
as the filename, and will contain findings in textproto format as SaveToGcsFindingsOutput. The file name will use
691+
the naming convention <job_id>-<shard_number>, for example: my-job-id-2.
692+
693+
Supported for InspectJobs. The bucket must not be the same as the bucket being inspected. If storing findings to
694+
Cloud Storage, the output schema field should not be set. If set, it will be ignored.`,
695+
MaxItems: 1,
696+
Elem: &schema.Resource{
697+
Schema: map[string]*schema.Schema{
698+
"path": {
699+
Type: schema.TypeString,
700+
Required: true,
701+
Description: `A URL representing a file or path (no wildcards) in Cloud Storage.
702+
Example: 'gs://[BUCKET_NAME]/dictionary.txt'`,
703+
},
704+
},
705+
},
706+
},
673707
"table": {
674708
Type: schema.TypeList,
675-
Required: true,
709+
Optional: true,
676710
Description: `Information on the location of the target BigQuery Table.`,
677711
MaxItems: 1,
678712
Elem: &schema.Resource{
@@ -696,19 +730,6 @@ is 1,024 characters.`,
696730
},
697731
},
698732
},
699-
"output_schema": {
700-
Type: schema.TypeString,
701-
Optional: true,
702-
ValidateFunc: verify.ValidateEnum([]string{"BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS", ""}),
703-
Description: `Schema used for writing the findings for Inspect jobs. This field is only used for
704-
Inspect and must be unspecified for Risk jobs. Columns are derived from the Finding
705-
object. If appending to an existing table, any columns from the predefined schema
706-
that are missing will be added. No columns in the existing table will be deleted.
707-
708-
If unspecified, then all available columns will be used for a new table or an (existing)
709-
table with no schema, and no changes will be made to an existing table that has a schema.
710-
Only for use with external storage. Possible values: ["BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS"]`,
711-
},
712733
},
713734
},
714735
},
@@ -3245,6 +3266,8 @@ func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfi
32453266
transformed := make(map[string]interface{})
32463267
transformed["table"] =
32473268
flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(original["table"], d, config)
3269+
transformed["storage_path"] =
3270+
flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigStoragePath(original["storagePath"], d, config)
32483271
transformed["output_schema"] =
32493272
flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(original["outputSchema"], d, config)
32503273
return []interface{}{transformed}
@@ -3278,6 +3301,23 @@ func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfi
32783301
return v
32793302
}
32803303

3304+
func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigStoragePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
3305+
if v == nil {
3306+
return nil
3307+
}
3308+
original := v.(map[string]interface{})
3309+
if len(original) == 0 {
3310+
return nil
3311+
}
3312+
transformed := make(map[string]interface{})
3313+
transformed["path"] =
3314+
flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigStoragePathPath(original["path"], d, config)
3315+
return []interface{}{transformed}
3316+
}
3317+
func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigStoragePathPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
3318+
return v
3319+
}
3320+
32813321
func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
32823322
return v
32833323
}
@@ -5686,6 +5726,13 @@ func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig
56865726
transformed["table"] = transformedTable
56875727
}
56885728

5729+
transformedStoragePath, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigStoragePath(original["storage_path"], d, config)
5730+
if err != nil {
5731+
return nil, err
5732+
} else if val := reflect.ValueOf(transformedStoragePath); val.IsValid() && !tpgresource.IsEmptyValue(val) {
5733+
transformed["storagePath"] = transformedStoragePath
5734+
}
5735+
56895736
transformedOutputSchema, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(original["output_schema"], d, config)
56905737
if err != nil {
56915738
return nil, err
@@ -5744,6 +5791,32 @@ func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig
57445791
return v, nil
57455792
}
57465793

5794+
func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigStoragePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
5795+
if v == nil {
5796+
return nil, nil
5797+
}
5798+
l := v.([]interface{})
5799+
if len(l) == 0 || l[0] == nil {
5800+
return nil, nil
5801+
}
5802+
raw := l[0]
5803+
original := raw.(map[string]interface{})
5804+
transformed := make(map[string]interface{})
5805+
5806+
transformedPath, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigStoragePathPath(original["path"], d, config)
5807+
if err != nil {
5808+
return nil, err
5809+
} else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) {
5810+
transformed["path"] = transformedPath
5811+
}
5812+
5813+
return transformed, nil
5814+
}
5815+
5816+
func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigStoragePathPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
5817+
return v, nil
5818+
}
5819+
57475820
func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
57485821
return v, nil
57495822
}

google-beta/services/datalossprevention/resource_data_loss_prevention_job_trigger_generated_meta.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ fields:
2222
- field: 'inspect_job.actions.publish_summary_to_cscc'
2323
- field: 'inspect_job.actions.publish_to_stackdriver'
2424
- field: 'inspect_job.actions.save_findings.output_config.output_schema'
25+
- field: 'inspect_job.actions.save_findings.output_config.storage_path.path'
2526
- field: 'inspect_job.actions.save_findings.output_config.table.dataset_id'
2627
- field: 'inspect_job.actions.save_findings.output_config.table.project_id'
2728
- field: 'inspect_job.actions.save_findings.output_config.table.table_id'

google-beta/services/datalossprevention/resource_data_loss_prevention_job_trigger_test.go

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -529,6 +529,41 @@ func TestAccDataLossPreventionJobTrigger_dlpJobTriggerCreateWithTimespanConfigBi
529529
})
530530
}
531531

532+
func TestAccDataLossPreventionJobTrigger_dlpJobTriggerSaveToCloudStorage(t *testing.T) {
533+
t.Parallel()
534+
535+
context := map[string]interface{}{
536+
"project": envvar.GetTestProjectFromEnv(),
537+
"random_suffix": acctest.RandString(t, 10),
538+
}
539+
540+
acctest.VcrTest(t, resource.TestCase{
541+
PreCheck: func() { acctest.AccTestPreCheck(t) },
542+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
543+
CheckDestroy: testAccCheckDataLossPreventionJobTriggerDestroyProducer(t),
544+
Steps: []resource.TestStep{
545+
{
546+
Config: testAccDataLossPreventionJobTrigger_inspectUpdateSaveToCloudStorage(context),
547+
},
548+
{
549+
ResourceName: "google_data_loss_prevention_job_trigger.basic",
550+
ImportState: true,
551+
ImportStateVerify: true,
552+
ImportStateVerifyIgnore: []string{"parent"},
553+
},
554+
{
555+
Config: testAccDataLossPreventionJobTrigger_inspectUpdateSaveToCloudStorageUpdate(context),
556+
},
557+
{
558+
ResourceName: "google_data_loss_prevention_job_trigger.basic",
559+
ImportState: true,
560+
ImportStateVerify: true,
561+
ImportStateVerifyIgnore: []string{"parent"},
562+
},
563+
},
564+
})
565+
}
566+
532567
func testAccDataLossPreventionJobTrigger_dlpJobTriggerBasic(context map[string]interface{}) string {
533568
return acctest.Nprintf(`
534569
resource "google_data_loss_prevention_job_trigger" "basic" {
@@ -2843,3 +2878,77 @@ resource "google_data_loss_prevention_job_trigger" "bigquery_row_limit_timespan"
28432878
}
28442879
`, context)
28452880
}
2881+
2882+
func testAccDataLossPreventionJobTrigger_inspectUpdateSaveToCloudStorage(context map[string]interface{}) string {
2883+
return acctest.Nprintf(`
2884+
resource "google_data_loss_prevention_job_trigger" "basic" {
2885+
parent = "projects/%{project}"
2886+
description = "Starting description"
2887+
display_name = "display"
2888+
2889+
triggers {
2890+
schedule {
2891+
recurrence_period_duration = "86400s"
2892+
}
2893+
}
2894+
2895+
inspect_job {
2896+
inspect_template_name = "fake"
2897+
actions {
2898+
save_findings {
2899+
output_config {
2900+
storage_path {
2901+
path = "gs://mybucket/save-path/"
2902+
}
2903+
}
2904+
}
2905+
}
2906+
storage_config {
2907+
cloud_storage_options {
2908+
file_set {
2909+
url = "gs://mybucket/directory/"
2910+
}
2911+
file_types = ["POWERPOINT", "EXCEL", "CSV", "TSV"]
2912+
}
2913+
}
2914+
}
2915+
}
2916+
`, context)
2917+
}
2918+
2919+
func testAccDataLossPreventionJobTrigger_inspectUpdateSaveToCloudStorageUpdate(context map[string]interface{}) string {
2920+
return acctest.Nprintf(`
2921+
resource "google_data_loss_prevention_job_trigger" "basic" {
2922+
parent = "projects/%{project}"
2923+
description = "Starting description"
2924+
display_name = "display"
2925+
2926+
triggers {
2927+
schedule {
2928+
recurrence_period_duration = "86400s"
2929+
}
2930+
}
2931+
2932+
inspect_job {
2933+
inspect_template_name = "fake"
2934+
actions {
2935+
save_findings {
2936+
output_config {
2937+
storage_path {
2938+
path = "gs://mybucket/save-path-updated/"
2939+
}
2940+
}
2941+
}
2942+
}
2943+
storage_config {
2944+
cloud_storage_options {
2945+
file_set {
2946+
url = "gs://mybucket/directory/"
2947+
}
2948+
file_types = ["POWERPOINT", "EXCEL", "CSV", "TSV"]
2949+
}
2950+
}
2951+
}
2952+
}
2953+
`, context)
2954+
}

website/docs/r/data_loss_prevention_job_trigger.html.markdown

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1513,10 +1513,19 @@ The following arguments are supported:
15131513
<a name="nested_inspect_job_actions_actions_save_findings_output_config"></a>The `output_config` block supports:
15141514

15151515
* `table` -
1516-
(Required)
1516+
(Optional)
15171517
Information on the location of the target BigQuery Table.
15181518
Structure is [documented below](#nested_inspect_job_actions_actions_save_findings_output_config_table).
15191519

1520+
* `storage_path` -
1521+
(Optional)
1522+
Store findings in an existing Cloud Storage bucket. Files will be generated with the job ID and file part number
1523+
as the filename, and will contain findings in textproto format as SaveToGcsFindingsOutput. The file name will use
1524+
the naming convention <job_id>-<shard_number>, for example: my-job-id-2.
1525+
Supported for InspectJobs. The bucket must not be the same as the bucket being inspected. If storing findings to
1526+
Cloud Storage, the output schema field should not be set. If set, it will be ignored.
1527+
Structure is [documented below](#nested_inspect_job_actions_actions_save_findings_output_config_storage_path).
1528+
15201529
* `output_schema` -
15211530
(Optional)
15221531
Schema used for writing the findings for Inspect jobs. This field is only used for
@@ -1544,6 +1553,13 @@ The following arguments are supported:
15441553
Name of the table. If is not set a new one will be generated for you with the following format:
15451554
`dlp_googleapis_yyyy_mm_dd_[dlp_job_id]`. Pacific timezone will be used for generating the date details.
15461555

1556+
<a name="nested_inspect_job_actions_actions_save_findings_output_config_storage_path"></a>The `storage_path` block supports:
1557+
1558+
* `path` -
1559+
(Required)
1560+
A URL representing a file or path (no wildcards) in Cloud Storage.
1561+
Example: `gs://[BUCKET_NAME]/dictionary.txt`
1562+
15471563
<a name="nested_inspect_job_actions_actions_pub_sub"></a>The `pub_sub` block supports:
15481564

15491565
* `topic` -

0 commit comments

Comments
 (0)