Skip to content

Commit 4fa3252

Browse files
authored
Merge pull request #43302 from gilandose/f-aws_emr_serverless_application-application_configuration
feat: r/aws_emerserverless_application add runtime_configuration as an optional argument
2 parents 2203a42 + 922295a commit 4fa3252

File tree

4 files changed

+240
-7
lines changed

4 files changed

+240
-7
lines changed

.changelog/43302.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
resource/aws_emrserverless_application: Add `runtime_configuration` argument
3+
```

internal/service/emrserverless/application.go

Lines changed: 88 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,23 @@ func resourceApplication() *schema.Resource {
229229
Type: schema.TypeString,
230230
Required: true,
231231
},
232+
"runtime_configuration": {
233+
Type: schema.TypeList,
234+
Optional: true,
235+
Elem: &schema.Resource{
236+
Schema: map[string]*schema.Schema{
237+
"classification": {
238+
Type: schema.TypeString,
239+
Required: true,
240+
},
241+
names.AttrProperties: {
242+
Type: schema.TypeMap,
243+
Optional: true,
244+
Elem: &schema.Schema{Type: schema.TypeString},
245+
},
246+
},
247+
},
248+
},
232249
"scheduler_configuration": {
233250
Type: schema.TypeList,
234251
Optional: true,
@@ -269,7 +286,7 @@ func resourceApplicationCreate(ctx context.Context, d *schema.ResourceData, meta
269286
conn := meta.(*conns.AWSClient).EMRServerlessClient(ctx)
270287

271288
name := d.Get(names.AttrName).(string)
272-
input := &emrserverless.CreateApplicationInput{
289+
input := emrserverless.CreateApplicationInput{
273290
ClientToken: aws.String(id.UniqueId()),
274291
ReleaseLabel: aws.String(d.Get("release_label").(string)),
275292
Name: aws.String(name),
@@ -309,13 +326,16 @@ func resourceApplicationCreate(ctx context.Context, d *schema.ResourceData, meta
309326
input.NetworkConfiguration = expandNetworkConfiguration(v.([]any)[0].(map[string]any))
310327
}
311328

329+
if v, ok := d.GetOk("runtime_configuration"); ok && len(v.([]any)) > 0 {
330+
input.RuntimeConfiguration = expandRuntimeConfiguration(v.([]any))
331+
}
332+
312333
// Empty block (len(v.([]any)) > 0 but v.([]any)[0] == nil) is allowed to enable scheduler_configuration with default values
313334
if v, ok := d.GetOk("scheduler_configuration"); ok && len(v.([]any)) > 0 {
314335
input.SchedulerConfiguration = expandSchedulerConfiguration(v.([]any))
315336
}
316337

317-
output, err := conn.CreateApplication(ctx, input)
318-
338+
output, err := conn.CreateApplication(ctx, &input)
319339
if err != nil {
320340
return sdkdiag.AppendErrorf(diags, "creating EMR Serveless Application (%s): %s", name, err)
321341
}
@@ -379,6 +399,10 @@ func resourceApplicationRead(ctx context.Context, d *schema.ResourceData, meta a
379399
return sdkdiag.AppendErrorf(diags, "setting network_configuration: %s", err)
380400
}
381401

402+
if err := d.Set("runtime_configuration", flattenRuntimeConfiguration(application.RuntimeConfiguration)); err != nil {
403+
return sdkdiag.AppendErrorf(diags, "setting runtime_configuration: %s", err)
404+
}
405+
382406
if err := d.Set("scheduler_configuration", flattenSchedulerConfiguration(application.SchedulerConfiguration)); err != nil {
383407
return sdkdiag.AppendErrorf(diags, "setting scheduler_configuration: %s", err)
384408
}
@@ -393,7 +417,7 @@ func resourceApplicationUpdate(ctx context.Context, d *schema.ResourceData, meta
393417
conn := meta.(*conns.AWSClient).EMRServerlessClient(ctx)
394418

395419
if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) {
396-
input := &emrserverless.UpdateApplicationInput{
420+
input := emrserverless.UpdateApplicationInput{
397421
ApplicationId: aws.String(d.Id()),
398422
ClientToken: aws.String(id.UniqueId()),
399423
}
@@ -444,7 +468,11 @@ func resourceApplicationUpdate(ctx context.Context, d *schema.ResourceData, meta
444468
input.ReleaseLabel = aws.String(v.(string))
445469
}
446470

447-
_, err := conn.UpdateApplication(ctx, input)
471+
if v, ok := d.GetOk("runtime_configuration"); ok && len(v.([]any)) > 0 {
472+
input.RuntimeConfiguration = expandRuntimeConfiguration(v.([]any))
473+
}
474+
475+
_, err := conn.UpdateApplication(ctx, &input)
448476

449477
if err != nil {
450478
return sdkdiag.AppendErrorf(diags, "updating EMR Serveless Application (%s): %s", d.Id(), err)
@@ -479,11 +507,11 @@ func resourceApplicationDelete(ctx context.Context, d *schema.ResourceData, meta
479507
}
480508

481509
func findApplicationByID(ctx context.Context, conn *emrserverless.Client, id string) (*types.Application, error) {
482-
input := &emrserverless.GetApplicationInput{
510+
input := emrserverless.GetApplicationInput{
483511
ApplicationId: aws.String(id),
484512
}
485513

486-
output, err := conn.GetApplication(ctx, input)
514+
output, err := conn.GetApplication(ctx, &input)
487515

488516
if errs.IsA[*types.ResourceNotFoundException](err) {
489517
return nil, &retry.NotFoundError{
@@ -942,3 +970,56 @@ func flattenSchedulerConfiguration(apiObject *types.SchedulerConfiguration) []an
942970
}
943971
return []any{tfMap}
944972
}
973+
974+
func expandRuntimeConfiguration(tfList []any) []types.Configuration {
975+
if len(tfList) == 0 {
976+
return nil
977+
}
978+
979+
var apiObjects []types.Configuration
980+
981+
for _, tfMapRaw := range tfList {
982+
tfMap, ok := tfMapRaw.(map[string]any)
983+
if !ok {
984+
continue
985+
}
986+
987+
apiObject := types.Configuration{}
988+
989+
if v, ok := tfMap["classification"].(string); ok && v != "" {
990+
apiObject.Classification = aws.String(v)
991+
}
992+
993+
if v, ok := tfMap[names.AttrProperties].(map[string]any); ok && len(v) > 0 {
994+
apiObject.Properties = flex.ExpandStringValueMap(v)
995+
}
996+
997+
apiObjects = append(apiObjects, apiObject)
998+
}
999+
1000+
return apiObjects
1001+
}
1002+
1003+
func flattenRuntimeConfiguration(apiObjects []types.Configuration) []any {
1004+
if len(apiObjects) == 0 {
1005+
return nil
1006+
}
1007+
1008+
var tfList []any
1009+
1010+
for _, apiObject := range apiObjects {
1011+
tfMap := map[string]any{}
1012+
1013+
if v := apiObject.Classification; v != nil {
1014+
tfMap["classification"] = aws.ToString(v)
1015+
}
1016+
1017+
if v := apiObject.Properties; v != nil {
1018+
tfMap[names.AttrProperties] = flex.FlattenStringValueMap(v)
1019+
}
1020+
1021+
tfList = append(tfList, tfMap)
1022+
}
1023+
1024+
return tfList
1025+
}

internal/service/emrserverless/application_test.go

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -424,6 +424,61 @@ func TestAccEMRServerlessApplication_tags(t *testing.T) {
424424
})
425425
}
426426

427+
func TestAccEMRServerlessApplication_runtimeConfiguration(t *testing.T) {
428+
ctx := acctest.Context(t)
429+
var application types.Application
430+
resourceName := "aws_emrserverless_application.test"
431+
rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix)
432+
433+
resource.ParallelTest(t, resource.TestCase{
434+
PreCheck: func() { acctest.PreCheck(ctx, t) },
435+
ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID),
436+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
437+
CheckDestroy: testAccCheckApplicationDestroy(ctx, t),
438+
Steps: []resource.TestStep{
439+
{
440+
Config: testAccApplicationConfig_applicationConfiguration(rName),
441+
Check: resource.ComposeTestCheckFunc(
442+
testAccCheckApplicationExists(ctx, t, resourceName, &application),
443+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.#", "3"),
444+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.classification", "spark-defaults"),
445+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.properties.%", "4"),
446+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.properties.spark.driver.cores", "2"),
447+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.properties.spark.executor.cores", "1"),
448+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.properties.spark.driver.memory", "4G"),
449+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.properties.spark.executor.memory", "4G"),
450+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.1.classification", "spark-executor-log4j2"),
451+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.1.properties.%", "3"),
452+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.1.properties.rootLogger.level", "error"),
453+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.1.properties.logger.IdentifierForClass.name", "classpathForSettingLogger"),
454+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.1.properties.logger.IdentifierForClass.level", "info"),
455+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.2.classification", "spark-driver-log4j2"),
456+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.2.properties.%", "0"),
457+
),
458+
},
459+
{
460+
ResourceName: resourceName,
461+
ImportState: true,
462+
ImportStateVerify: true,
463+
},
464+
{
465+
Config: testAccApplicationConfig_applicationConfigurationUpdated(rName),
466+
Check: resource.ComposeTestCheckFunc(
467+
testAccCheckApplicationExists(ctx, t, resourceName, &application),
468+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.#", "2"),
469+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.classification", "spark-defaults"),
470+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.properties.%", "2"),
471+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.properties.spark.driver.cores", "4"),
472+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.0.properties.spark.driver.memory", "8G"),
473+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.1.classification", "hive-site"),
474+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.1.properties.%", "1"),
475+
resource.TestCheckResourceAttr(resourceName, "runtime_configuration.1.properties.hive.metastore.client.factory.class", "com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory"),
476+
),
477+
},
478+
},
479+
})
480+
}
481+
427482
func testAccCheckApplicationExists(ctx context.Context, t *testing.T, resourceName string, application *types.Application) resource.TestCheckFunc {
428483
return func(s *terraform.State) error {
429484
rs, ok := s.RootModule().Resources[resourceName]
@@ -956,3 +1011,66 @@ resource "aws_emrserverless_application" "test" {
9561011
}
9571012
`, rName, queueTimeoutMinutes)
9581013
}
1014+
1015+
func testAccApplicationConfig_applicationConfiguration(rName string) string {
1016+
return fmt.Sprintf(`
1017+
resource "aws_emrserverless_application" "test" {
1018+
name = %[1]q
1019+
release_label = "emr-6.8.0"
1020+
type = "spark"
1021+
1022+
runtime_configuration {
1023+
classification = "spark-defaults"
1024+
1025+
properties = {
1026+
"spark.driver.cores" = "2"
1027+
"spark.executor.cores" = "1"
1028+
"spark.driver.memory" = "4G"
1029+
"spark.executor.memory" = "4G"
1030+
}
1031+
}
1032+
1033+
runtime_configuration {
1034+
classification = "spark-executor-log4j2"
1035+
1036+
properties = {
1037+
"rootLogger.level" = "error"
1038+
"logger.IdentifierForClass.name" = "classpathForSettingLogger"
1039+
"logger.IdentifierForClass.level" = "info"
1040+
}
1041+
}
1042+
1043+
runtime_configuration {
1044+
classification = "spark-driver-log4j2"
1045+
properties = {}
1046+
}
1047+
}
1048+
`, rName)
1049+
}
1050+
1051+
func testAccApplicationConfig_applicationConfigurationUpdated(rName string) string {
1052+
return fmt.Sprintf(`
1053+
resource "aws_emrserverless_application" "test" {
1054+
name = %[1]q
1055+
release_label = "emr-6.8.0"
1056+
type = "spark"
1057+
1058+
runtime_configuration {
1059+
classification = "spark-defaults"
1060+
1061+
properties = {
1062+
"spark.driver.cores" = "4"
1063+
"spark.driver.memory" = "8G"
1064+
}
1065+
}
1066+
1067+
runtime_configuration {
1068+
classification = "hive-site"
1069+
1070+
properties = {
1071+
"hive.metastore.client.factory.class" = "com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory"
1072+
}
1073+
}
1074+
}
1075+
`, rName)
1076+
}

website/docs/r/emrserverless_application.html.markdown

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,11 +59,37 @@ resource "aws_emrserverless_application" "example" {
5959
}
6060
```
6161

62+
### Runtime Configuration Usage
63+
64+
```terraform
65+
resource "aws_emrserverless_application" "example" {
66+
name = "example"
67+
release_label = "emr-6.8.0"
68+
type = "spark"
69+
runtime_configuration {
70+
classification = "spark-executor-log4j2"
71+
properties = {
72+
"rootLogger.level" = "error"
73+
"logger.IdentifierForClass.name" = "classpathForSettingLogger"
74+
"logger.IdentifierForClass.level" = "info"
75+
}
76+
}
77+
runtime_configuration {
78+
classification = "spark-defaults"
79+
properties = {
80+
"spark.executor.memory" = "1g"
81+
"spark.executor.cores" = "1"
82+
}
83+
}
84+
}
85+
```
86+
6287
## Argument Reference
6388

6489
This resource supports the following arguments:
6590

6691
* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference).
92+
* `runtime_configuration` - (Optional) A configuration specification to be used when provisioning an application. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file.
6793
* `architecture` - (Optional) The CPU architecture of an application. Valid values are `ARM64` or `X86_64`. Default value is `X86_64`.
6894
* `auto_start_configuration` - (Optional) The configuration for an application to automatically start on job submission.
6995
* `auto_stop_configuration` - (Optional) The configuration for an application to automatically stop after a certain amount of time being idle.
@@ -87,6 +113,11 @@ This resource supports the following arguments:
87113
* `enabled` - (Optional) Enables the application to automatically stop after a certain amount of time being idle. Defaults to `true`.
88114
* `idle_timeout_minutes` - (Optional) The amount of idle time in minutes after which your application will automatically stop. Defaults to `15` minutes.
89115

116+
### runtime_configuration Arguments
117+
118+
* `classification` - (Required) The classification within a configuration.
119+
* `properties` - (Optional) A set of properties specified within a configuration classification.
120+
90121
### initial_capacity Arguments
91122

92123
* `initial_capacity_config` - (Optional) The initial capacity configuration per worker.

0 commit comments

Comments
 (0)