From 0a2f92b4f600ff404352ae8b4d8eb623b70c5c21 Mon Sep 17 00:00:00 2001 From: Wesley Nabo Date: Tue, 5 Aug 2025 14:23:33 -0500 Subject: [PATCH 1/3] feat: create streams_instance duplicate command stub in streams_workspace --- .../data_source_stream_workspace.go | 54 ++++ .../data_source_stream_workspace_test.go | 43 +++ .../data_source_stream_workspaces.go | 72 +++++ .../data_source_stream_workspaces_test.go | 97 ++++++ internal/service/streamworkspace/main_test.go | 15 + .../streamworkspace/model_stream_workspace.go | 103 ++++++ .../model_stream_workspace_test.go | 296 ++++++++++++++++++ .../streamworkspace/resource_schema.go | 85 +++++ .../resource_stream_workspace.go | 162 ++++++++++ ...esource_stream_workspace_migration_test.go | 33 ++ .../resource_stream_workspace_test.go | 104 ++++++ 11 files changed, 1064 insertions(+) create mode 100644 internal/service/streamworkspace/data_source_stream_workspace.go create mode 100644 internal/service/streamworkspace/data_source_stream_workspace_test.go create mode 100644 internal/service/streamworkspace/data_source_stream_workspaces.go create mode 100644 internal/service/streamworkspace/data_source_stream_workspaces_test.go create mode 100644 internal/service/streamworkspace/main_test.go create mode 100644 internal/service/streamworkspace/model_stream_workspace.go create mode 100644 internal/service/streamworkspace/model_stream_workspace_test.go create mode 100644 internal/service/streamworkspace/resource_schema.go create mode 100644 internal/service/streamworkspace/resource_stream_workspace.go create mode 100644 internal/service/streamworkspace/resource_stream_workspace_migration_test.go create mode 100644 internal/service/streamworkspace/resource_stream_workspace_test.go diff --git a/internal/service/streamworkspace/data_source_stream_workspace.go b/internal/service/streamworkspace/data_source_stream_workspace.go new file mode 100644 index 0000000000..3e4ece7698 --- /dev/null +++ b/internal/service/streamworkspace/data_source_stream_workspace.go @@ -0,0 +1,54 @@ +package streamworkspace + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" +) + +var _ datasource.DataSource = &streamWorkspaceDS{} +var _ datasource.DataSourceWithConfigure = &streamWorkspaceDS{} + +func DataSource() datasource.DataSource { + return &streamWorkspaceDS{ + DSCommon: config.DSCommon{ + DataSourceName: streamWorkspaceName, + }, + } +} + +type streamWorkspaceDS struct { + config.DSCommon +} + +func (d *streamWorkspaceDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = conversion.DataSourceSchemaFromResource(ResourceSchema(ctx), &conversion.DataSourceSchemaRequest{ + RequiredFields: []string{"project_id", "workspace_name"}, + }) +} + +func (d *streamWorkspaceDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var streamWorkspaceConfig TFStreamWorkspaceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &streamWorkspaceConfig)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := d.Client.AtlasV2 + projectID := streamWorkspaceConfig.ProjectID.ValueString() + workspaceName := streamWorkspaceConfig.WorkspaceName.ValueString() + apiResp, _, err := connV2.StreamsApi.GetStreamInstance(ctx, projectID, workspaceName).Execute() + if err != nil { + resp.Diagnostics.AddError("error fetching resource", err.Error()) + return + } + + newStreamWorkspaceModel, diags := NewTFStreamWorkspace(ctx, apiResp) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newStreamWorkspaceModel)...) +} diff --git a/internal/service/streamworkspace/data_source_stream_workspace_test.go b/internal/service/streamworkspace/data_source_stream_workspace_test.go new file mode 100644 index 0000000000..d0429658a3 --- /dev/null +++ b/internal/service/streamworkspace/data_source_stream_workspace_test.go @@ -0,0 +1,43 @@ +package streamworkspace_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" +) + +func TestAccStreamDSStreamWorkspace_basic(t *testing.T) { + var ( + dataSourceName = "data.mongodbatlas_stream_workspace.test" + projectID = acc.ProjectIDExecution(t) + workspaceName = acc.RandomName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyStreamInstance, + Steps: []resource.TestStep{ + { + Config: streamWorkspaceDataSourceConfig(projectID, workspaceName, region, cloudProvider), + Check: resource.ComposeAggregateTestCheckFunc( + streamWorkspaceAttributeChecks(dataSourceName, workspaceName, region, cloudProvider), + resource.TestCheckResourceAttr(dataSourceName, "stream_config.tier", "SP30"), + ), + }, + }, + }) +} + +func streamWorkspaceDataSourceConfig(projectID, workspaceName, region, cloudProvider string) string { + return fmt.Sprintf(` + %s + + data "mongodbatlas_stream_workspace" "test" { + project_id = mongodbatlas_stream_workspace.test.project_id + workspace_name = mongodbatlas_stream_workspace.test.workspace_name + } + `, acc.StreamInstanceConfig(projectID, workspaceName, region, cloudProvider)) +} diff --git a/internal/service/streamworkspace/data_source_stream_workspaces.go b/internal/service/streamworkspace/data_source_stream_workspaces.go new file mode 100644 index 0000000000..9332a433af --- /dev/null +++ b/internal/service/streamworkspace/data_source_stream_workspaces.go @@ -0,0 +1,72 @@ +package streamworkspace + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" + "go.mongodb.org/atlas-sdk/v20250312005/admin" +) + +var _ datasource.DataSource = &streamWorkspacesDS{} +var _ datasource.DataSourceWithConfigure = &streamWorkspacesDS{} + +func PluralDataSource() datasource.DataSource { + return &streamWorkspacesDS{ + DSCommon: config.DSCommon{ + DataSourceName: fmt.Sprintf("%ss", streamWorkspaceName), + }, + } +} + +type streamWorkspacesDS struct { + config.DSCommon +} + +func (d *streamWorkspacesDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = conversion.PluralDataSourceSchemaFromResource(ResourceSchema(ctx), &conversion.PluralDataSourceSchemaRequest{ + RequiredFields: []string{"project_id"}, + HasLegacyFields: true, + }) +} + +func (d *streamWorkspacesDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var streamWorkspacesConfig TFStreamWorkspacesModel + resp.Diagnostics.Append(req.Config.Get(ctx, &streamWorkspacesConfig)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := d.Client.AtlasV2 + projectID := streamWorkspacesConfig.ProjectID.ValueString() + itemsPerPage := streamWorkspacesConfig.ItemsPerPage.ValueInt64Pointer() + pageNum := streamWorkspacesConfig.PageNum.ValueInt64Pointer() + apiResp, _, err := connV2.StreamsApi.ListStreamInstancesWithParams(ctx, &admin.ListStreamInstancesApiParams{ + GroupId: projectID, + ItemsPerPage: conversion.Int64PtrToIntPtr(itemsPerPage), + PageNum: conversion.Int64PtrToIntPtr(pageNum), + }).Execute() + if err != nil { + resp.Diagnostics.AddError("error fetching results", err.Error()) + return + } + + newStreamWorkspacesModel, diags := NewTFStreamWorkspaces(ctx, &streamWorkspacesConfig, apiResp) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newStreamWorkspacesModel)...) +} + +type TFStreamWorkspacesModel struct { + ID types.String `tfsdk:"id"` + ProjectID types.String `tfsdk:"project_id"` + Results []TFStreamWorkspaceModel `tfsdk:"results"` + PageNum types.Int64 `tfsdk:"page_num"` + ItemsPerPage types.Int64 `tfsdk:"items_per_page"` + TotalCount types.Int64 `tfsdk:"total_count"` +} diff --git a/internal/service/streamworkspace/data_source_stream_workspaces_test.go b/internal/service/streamworkspace/data_source_stream_workspaces_test.go new file mode 100644 index 0000000000..5db68d54bb --- /dev/null +++ b/internal/service/streamworkspace/data_source_stream_workspaces_test.go @@ -0,0 +1,97 @@ +package streamworkspace_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" + "go.mongodb.org/atlas-sdk/v20250312005/admin" +) + +func TestAccStreamDSStreamworkspaces_basic(t *testing.T) { + var ( + dataSourceName = "data.mongodbatlas_stream_workspaces.test" + projectID = acc.ProjectIDExecution(t) + workspaceName = acc.RandomName() + ) + + checks := paginatedAttrChecks(dataSourceName, nil, nil) + // created workspace is present in results + checks = append(checks, resource.TestCheckResourceAttrWith(dataSourceName, "results.#", acc.IntGreatThan(0)), + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "results.*", map[string]string{ + "workspace_name": workspaceName, + })) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyStreamInstance, + Steps: []resource.TestStep{ + { + Config: streamworkspacesDataSourceConfig(projectID, workspaceName, region, cloudProvider), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + }, + }) +} + +func TestAccStreamDSStreamworkspaces_withPageConfig(t *testing.T) { + var ( + dataSourceName = "data.mongodbatlas_stream_workspaces.test" + projectID = acc.ProjectIDExecution(t) + workspaceName = acc.RandomName() + pageNumber = 1000 // high page number so no results are returned + ) + + checks := paginatedAttrChecks(dataSourceName, admin.PtrInt(pageNumber), admin.PtrInt(1)) + checks = append(checks, resource.TestCheckResourceAttr(dataSourceName, "results.#", "0")) // expecting no results + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyStreamInstance, + Steps: []resource.TestStep{ + { + Config: streamworkspacesWithPageAttrDataSourceConfig(projectID, workspaceName, region, cloudProvider, pageNumber), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + }, + }) +} + +func streamworkspacesDataSourceConfig(projectID, workspaceName, region, cloudProvider string) string { + return fmt.Sprintf(` + %s + + data "mongodbatlas_stream_workspaces" "test" { + project_id = mongodbatlas_stream_workspace.test.project_id + } + `, acc.StreamInstanceConfig(projectID, workspaceName, region, cloudProvider)) +} + +func streamworkspacesWithPageAttrDataSourceConfig(projectID, workspaceName, region, cloudProvider string, pageNum int) string { + return fmt.Sprintf(` + %s + + data "mongodbatlas_stream_workspaces" "test" { + project_id = mongodbatlas_stream_workspace.test.project_id + page_num = %d + items_per_page = 1 + } + `, acc.StreamInstanceConfig(projectID, workspaceName, region, cloudProvider), pageNum) +} + +func paginatedAttrChecks(resourceName string, pageNum, itemsPerPage *int) []resource.TestCheckFunc { + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(resourceName, "project_id"), + resource.TestCheckResourceAttrSet(resourceName, "total_count"), + } + if pageNum != nil { + checks = append(checks, resource.TestCheckResourceAttr(resourceName, "page_num", fmt.Sprint(*pageNum))) + } + if itemsPerPage != nil { + checks = append(checks, resource.TestCheckResourceAttr(resourceName, "items_per_page", fmt.Sprint(*itemsPerPage))) + } + return checks +} diff --git a/internal/service/streamworkspace/main_test.go b/internal/service/streamworkspace/main_test.go new file mode 100644 index 0000000000..b497136d03 --- /dev/null +++ b/internal/service/streamworkspace/main_test.go @@ -0,0 +1,15 @@ +package streamworkspace_test + +import ( + "os" + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" +) + +func TestMain(m *testing.M) { + cleanup := acc.SetupSharedResources() + exitCode := m.Run() + cleanup() + os.Exit(exitCode) +} diff --git a/internal/service/streamworkspace/model_stream_workspace.go b/internal/service/streamworkspace/model_stream_workspace.go new file mode 100644 index 0000000000..eaaf7451bb --- /dev/null +++ b/internal/service/streamworkspace/model_stream_workspace.go @@ -0,0 +1,103 @@ +package streamworkspace + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "go.mongodb.org/atlas-sdk/v20250312005/admin" +) + +func NewStreamWorkspaceCreateReq(ctx context.Context, plan *TFStreamWorkspaceModel) (*admin.StreamsTenant, diag.Diagnostics) { + dataProcessRegion := &TFWorkspaceProcessRegionSpecModel{} + if diags := plan.DataProcessRegion.As(ctx, dataProcessRegion, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + streamTenant := &admin.StreamsTenant{ + GroupId: plan.ProjectID.ValueStringPointer(), + Name: plan.WorkspaceName.ValueStringPointer(), + DataProcessRegion: &admin.StreamsDataProcessRegion{ + CloudProvider: dataProcessRegion.CloudProvider.ValueString(), + Region: dataProcessRegion.Region.ValueString(), + }, + } + if !plan.StreamConfig.IsNull() && !plan.StreamConfig.IsUnknown() { + streamConfig := new(TFWorkspaceStreamConfigSpecModel) + if diags := plan.StreamConfig.As(ctx, streamConfig, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + streamTenant.StreamConfig = &admin.StreamConfig{ + Tier: streamConfig.Tier.ValueStringPointer(), + } + } + return streamTenant, nil +} + +func NewStreamWorkspaceUpdateReq(ctx context.Context, plan *TFStreamWorkspaceModel) (*admin.StreamsDataProcessRegion, diag.Diagnostics) { + dataProcessRegion := &TFWorkspaceProcessRegionSpecModel{} + if diags := plan.DataProcessRegion.As(ctx, dataProcessRegion, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + return &admin.StreamsDataProcessRegion{ + CloudProvider: dataProcessRegion.CloudProvider.ValueString(), + Region: dataProcessRegion.Region.ValueString(), + }, nil +} + +func NewTFStreamWorkspace(ctx context.Context, apiResp *admin.StreamsTenant) (*TFStreamWorkspaceModel, diag.Diagnostics) { + hostnames, diags := types.ListValueFrom(ctx, types.StringType, apiResp.Hostnames) + + var dataProcessRegion = types.ObjectNull(ProcessRegionObjectType.AttrTypes) + if apiResp.DataProcessRegion != nil { + returnedProcessRegion, diagsProcessRegion := types.ObjectValueFrom(ctx, ProcessRegionObjectType.AttrTypes, TFWorkspaceProcessRegionSpecModel{ + CloudProvider: types.StringValue(apiResp.DataProcessRegion.CloudProvider), + Region: types.StringValue(apiResp.DataProcessRegion.Region), + }) + dataProcessRegion = returnedProcessRegion + diags.Append(diagsProcessRegion...) + } + var streamConfig = types.ObjectNull(StreamConfigObjectType.AttrTypes) + apiStreamConfig := apiResp.StreamConfig + if apiStreamConfig != nil && apiStreamConfig.Tier != nil { + returnedStreamConfig, diagsStreamConfig := types.ObjectValueFrom(ctx, StreamConfigObjectType.AttrTypes, TFWorkspaceStreamConfigSpecModel{ + Tier: types.StringPointerValue(apiStreamConfig.Tier), + }) + streamConfig = returnedStreamConfig + diags.Append(diagsStreamConfig...) + } + if diags.HasError() { + return nil, diags + } + + return &TFStreamWorkspaceModel{ + ID: types.StringPointerValue(apiResp.Id), + WorkspaceName: types.StringPointerValue(apiResp.Name), + ProjectID: types.StringPointerValue(apiResp.GroupId), + DataProcessRegion: dataProcessRegion, + StreamConfig: streamConfig, + Hostnames: hostnames, + }, nil +} + +func NewTFStreamWorkspaces(ctx context.Context, streamWorkspacesConfig *TFStreamWorkspacesModel, paginatedResult *admin.PaginatedApiStreamsTenant) (*TFStreamWorkspacesModel, diag.Diagnostics) { + input := paginatedResult.GetResults() + results := make([]TFStreamWorkspaceModel, len(input)) + for i := range input { + workspace, diags := NewTFStreamWorkspace(ctx, &input[i]) + if diags.HasError() { + return nil, diags + } + results[i] = *workspace + } + return &TFStreamWorkspacesModel{ + ID: types.StringValue(id.UniqueId()), + ProjectID: streamWorkspacesConfig.ProjectID, + PageNum: streamWorkspacesConfig.PageNum, + ItemsPerPage: streamWorkspacesConfig.ItemsPerPage, + TotalCount: types.Int64PointerValue(conversion.IntPtrToInt64Ptr(paginatedResult.TotalCount)), + Results: results, + }, nil +} diff --git a/internal/service/streamworkspace/model_stream_workspace_test.go b/internal/service/streamworkspace/model_stream_workspace_test.go new file mode 100644 index 0000000000..c6b5200be7 --- /dev/null +++ b/internal/service/streamworkspace/model_stream_workspace_test.go @@ -0,0 +1,296 @@ +package streamworkspace_test + +import ( + "reflect" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/types" + streamworkspace "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamworkspace" + "go.mongodb.org/atlas-sdk/v20250312005/admin" +) + +const ( + dummyProjectID = "111111111111111111111111" + dummyStreamWorkspaceID = "222222222222222222222222" + cloudProvider = "AWS" + region = "VIRGINIA_USA" + workspaceName = "WorkspaceName" + tier = "SP30" +) + +var hostnames = &[]string{"atlas-stream.virginia-usa.a.query.mongodb-dev.net"} + +type sdkToTFModelTestCase struct { + SDKResp *admin.StreamsTenant + expectedTFModel *streamworkspace.TFStreamWorkspaceModel + name string +} + +func TestStreamWorkspaceSDKToTFModel(t *testing.T) { + testCases := []sdkToTFModelTestCase{ + { + name: "Complete SDK response", + SDKResp: &admin.StreamsTenant{ + Id: admin.PtrString(dummyStreamWorkspaceID), + DataProcessRegion: &admin.StreamsDataProcessRegion{ + CloudProvider: cloudProvider, + Region: region, + }, + StreamConfig: &admin.StreamConfig{ + Tier: admin.PtrString(tier), + }, + GroupId: admin.PtrString(dummyProjectID), + Hostnames: hostnames, + Name: admin.PtrString(workspaceName), + }, + expectedTFModel: &streamworkspace.TFStreamWorkspaceModel{ + ID: types.StringValue(dummyStreamWorkspaceID), + DataProcessRegion: tfRegionObject(t, cloudProvider, region), + ProjectID: types.StringValue(dummyProjectID), + Hostnames: tfHostnamesList(t, hostnames), + WorkspaceName: types.StringValue(workspaceName), + StreamConfig: tfStreamConfigObject(t, tier), + }, + }, + { + name: "Empty hostnames, streamConfig and dataProcessRegion in response", // should never happen, but verifying it is handled gracefully + SDKResp: &admin.StreamsTenant{ + Id: admin.PtrString(dummyStreamWorkspaceID), + GroupId: admin.PtrString(dummyProjectID), + Name: admin.PtrString(workspaceName), + }, + expectedTFModel: &streamworkspace.TFStreamWorkspaceModel{ + ID: types.StringValue(dummyStreamWorkspaceID), + DataProcessRegion: types.ObjectNull(streamworkspace.ProcessRegionObjectType.AttrTypes), + ProjectID: types.StringValue(dummyProjectID), + Hostnames: types.ListNull(types.StringType), + WorkspaceName: types.StringValue(workspaceName), + StreamConfig: types.ObjectNull(streamworkspace.StreamConfigObjectType.AttrTypes), + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + resultModel, diags := streamworkspace.NewTFStreamWorkspace(t.Context(), tc.SDKResp) + if diags.HasError() { + t.Errorf("unexpected errors found: %s", diags.Errors()[0].Summary()) + } + if !reflect.DeepEqual(resultModel, tc.expectedTFModel) { + t.Errorf("created terraform model did not match expected output") + } + }) + } +} + +type paginatedWorkspacesSDKToTFModelTestCase struct { + SDKResp *admin.PaginatedApiStreamsTenant + providedConfig *streamworkspace.TFStreamWorkspacesModel + expectedTFModel *streamworkspace.TFStreamWorkspacesModel + name string +} + +func TestStreamWorkspacesSDKToTFModel(t *testing.T) { + testCases := []paginatedWorkspacesSDKToTFModelTestCase{ + { + name: "Complete SDK response with configured page options", + SDKResp: &admin.PaginatedApiStreamsTenant{ + Results: &[]admin.StreamsTenant{ + { + Id: admin.PtrString(dummyStreamWorkspaceID), + DataProcessRegion: &admin.StreamsDataProcessRegion{ + CloudProvider: cloudProvider, + Region: region, + }, + GroupId: admin.PtrString(dummyProjectID), + Hostnames: hostnames, + Name: admin.PtrString(workspaceName), + StreamConfig: &admin.StreamConfig{ + Tier: admin.PtrString(tier), + }, + }, + }, + TotalCount: admin.PtrInt(1), + }, + providedConfig: &streamworkspace.TFStreamWorkspacesModel{ + ProjectID: types.StringValue(dummyProjectID), + PageNum: types.Int64Value(1), + ItemsPerPage: types.Int64Value(2), + }, + expectedTFModel: &streamworkspace.TFStreamWorkspacesModel{ + ProjectID: types.StringValue(dummyProjectID), + PageNum: types.Int64Value(1), + ItemsPerPage: types.Int64Value(2), + TotalCount: types.Int64Value(1), + Results: []streamworkspace.TFStreamWorkspaceModel{ + { + ID: types.StringValue(dummyStreamWorkspaceID), + DataProcessRegion: tfRegionObject(t, cloudProvider, region), + ProjectID: types.StringValue(dummyProjectID), + Hostnames: tfHostnamesList(t, hostnames), + WorkspaceName: types.StringValue(workspaceName), + StreamConfig: tfStreamConfigObject(t, tier), + }, + }, + }, + }, + { + name: "Without defining page options", + SDKResp: &admin.PaginatedApiStreamsTenant{ + Results: &[]admin.StreamsTenant{}, + TotalCount: admin.PtrInt(0), + }, + providedConfig: &streamworkspace.TFStreamWorkspacesModel{ + ProjectID: types.StringValue(dummyProjectID), + }, + expectedTFModel: &streamworkspace.TFStreamWorkspacesModel{ + ProjectID: types.StringValue(dummyProjectID), + PageNum: types.Int64Null(), + ItemsPerPage: types.Int64Null(), + TotalCount: types.Int64Value(0), + Results: []streamworkspace.TFStreamWorkspaceModel{}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + resultModel, diags := streamworkspace.NewTFStreamWorkspaces(t.Context(), tc.providedConfig, tc.SDKResp) + tc.expectedTFModel.ID = resultModel.ID // id is auto-generated, have no way of defining within expected model + if diags.HasError() { + t.Errorf("unexpected errors found: %s", diags.Errors()[0].Summary()) + } + if !reflect.DeepEqual(resultModel, tc.expectedTFModel) { + t.Errorf("created terraform model did not match expected output") + } + }) + } +} + +type tfToSDKCreateModelTestCase struct { + tfModel *streamworkspace.TFStreamWorkspaceModel + expectedSDKReq *admin.StreamsTenant + name string +} + +func TestStreamWorkspaceTFToSDKCreateModel(t *testing.T) { + testCases := []tfToSDKCreateModelTestCase{ + { + name: "Complete TF state", + tfModel: &streamworkspace.TFStreamWorkspaceModel{ + DataProcessRegion: tfRegionObject(t, cloudProvider, region), + ProjectID: types.StringValue(dummyProjectID), + WorkspaceName: types.StringValue(workspaceName), + StreamConfig: tfStreamConfigObject(t, tier), + }, + expectedSDKReq: &admin.StreamsTenant{ + DataProcessRegion: &admin.StreamsDataProcessRegion{ + CloudProvider: cloudProvider, + Region: region, + }, + GroupId: admin.PtrString(dummyProjectID), + Name: admin.PtrString(workspaceName), + StreamConfig: &admin.StreamConfig{ + Tier: admin.PtrString(tier), + }, + }, + }, + { + name: "TF State without StreamConfig", + tfModel: &streamworkspace.TFStreamWorkspaceModel{ + DataProcessRegion: tfRegionObject(t, cloudProvider, region), + ProjectID: types.StringValue(dummyProjectID), + WorkspaceName: types.StringValue(workspaceName), + }, + expectedSDKReq: &admin.StreamsTenant{ + DataProcessRegion: &admin.StreamsDataProcessRegion{ + CloudProvider: cloudProvider, + Region: region, + }, + GroupId: admin.PtrString(dummyProjectID), + Name: admin.PtrString(workspaceName), + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + apiReqResult, diags := streamworkspace.NewStreamWorkspaceCreateReq(t.Context(), tc.tfModel) + if diags.HasError() { + t.Errorf("unexpected errors found: %s", diags.Errors()[0].Summary()) + } + if !reflect.DeepEqual(apiReqResult, tc.expectedSDKReq) { + t.Errorf("created sdk model did not match expected output") + } + }) + } +} + +type tfToSDKUpdateModelTestCase struct { + tfModel *streamworkspace.TFStreamWorkspaceModel + expectedSDKReq *admin.StreamsDataProcessRegion + name string +} + +func TestStreamWorkspaceTFToSDKUpdateModel(t *testing.T) { + testCases := []tfToSDKUpdateModelTestCase{ + { + name: "Complete TF state", + tfModel: &streamworkspace.TFStreamWorkspaceModel{ + ID: types.StringValue(dummyStreamWorkspaceID), + DataProcessRegion: tfRegionObject(t, cloudProvider, region), + ProjectID: types.StringValue(dummyProjectID), + Hostnames: tfHostnamesList(t, hostnames), + WorkspaceName: types.StringValue(workspaceName), + }, + expectedSDKReq: &admin.StreamsDataProcessRegion{ + CloudProvider: cloudProvider, + Region: region, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + apiReqResult, diags := streamworkspace.NewStreamWorkspaceUpdateReq(t.Context(), tc.tfModel) + if diags.HasError() { + t.Errorf("unexpected errors found: %s", diags.Errors()[0].Summary()) + } + if !reflect.DeepEqual(apiReqResult, tc.expectedSDKReq) { + t.Errorf("created sdk model did not match expected output") + } + }) + } +} + +func tfRegionObject(t *testing.T, cloudProvider, region string) types.Object { + t.Helper() + dataProcessRegion, diags := types.ObjectValueFrom(t.Context(), streamworkspace.ProcessRegionObjectType.AttrTypes, streamworkspace.TFWorkspaceProcessRegionSpecModel{ + CloudProvider: types.StringValue(cloudProvider), + Region: types.StringValue(region), + }) + if diags.HasError() { + t.Errorf("failed to create terraform data process region model: %s", diags.Errors()[0].Summary()) + } + return dataProcessRegion +} + +func tfStreamConfigObject(t *testing.T, tier string) types.Object { + t.Helper() + streamConfig, diags := types.ObjectValueFrom(t.Context(), streamworkspace.StreamConfigObjectType.AttrTypes, streamworkspace.TFWorkspaceStreamConfigSpecModel{ + Tier: types.StringValue(tier), + }) + if diags.HasError() { + t.Errorf("failed to create terraform data process region model: %s", diags.Errors()[0].Summary()) + } + return streamConfig +} + +func tfHostnamesList(t *testing.T, hostnames *[]string) types.List { + t.Helper() + resultList, diags := types.ListValueFrom(t.Context(), types.StringType, hostnames) + if diags.HasError() { + t.Errorf("failed to create terraform hostnames list: %s", diags.Errors()[0].Summary()) + } + return resultList +} diff --git a/internal/service/streamworkspace/resource_schema.go b/internal/service/streamworkspace/resource_schema.go new file mode 100644 index 0000000000..f2038fb52e --- /dev/null +++ b/internal/service/streamworkspace/resource_schema.go @@ -0,0 +1,85 @@ +package streamworkspace + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ResourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "workspace_name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "project_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "data_process_region": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "cloud_provider": schema.StringAttribute{ + Required: true, + }, + "region": schema.StringAttribute{ + Required: true, + }, + }, + }, + "hostnames": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "stream_config": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "tier": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + }, + }, + }, + } +} + +type TFStreamWorkspaceModel struct { + ID types.String `tfsdk:"id"` + WorkspaceName types.String `tfsdk:"workspace_name"` + ProjectID types.String `tfsdk:"project_id"` + DataProcessRegion types.Object `tfsdk:"data_process_region"` + StreamConfig types.Object `tfsdk:"stream_config"` + Hostnames types.List `tfsdk:"hostnames"` +} + +type TFWorkspaceProcessRegionSpecModel struct { + CloudProvider types.String `tfsdk:"cloud_provider"` + Region types.String `tfsdk:"region"` +} + +type TFWorkspaceStreamConfigSpecModel struct { + Tier types.String `tfsdk:"tier"` +} + +var ProcessRegionObjectType = types.ObjectType{AttrTypes: map[string]attr.Type{ + "cloud_provider": types.StringType, + "region": types.StringType, +}} + +var StreamConfigObjectType = types.ObjectType{AttrTypes: map[string]attr.Type{ + "tier": types.StringType, +}} diff --git a/internal/service/streamworkspace/resource_stream_workspace.go b/internal/service/streamworkspace/resource_stream_workspace.go new file mode 100644 index 0000000000..6821c83450 --- /dev/null +++ b/internal/service/streamworkspace/resource_stream_workspace.go @@ -0,0 +1,162 @@ +package streamworkspace + +import ( + "context" + "errors" + "regexp" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/validate" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" +) + +var _ resource.ResourceWithConfigure = &streamWorkspaceRS{} +var _ resource.ResourceWithImportState = &streamWorkspaceRS{} + +const streamWorkspaceName = "stream_workspace" + +func Resource() resource.Resource { + return &streamWorkspaceRS{ + RSCommon: config.RSCommon{ + ResourceName: streamWorkspaceName, + }, + } +} + +type streamWorkspaceRS struct { + config.RSCommon +} + +func (r *streamWorkspaceRS) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = ResourceSchema(ctx) + conversion.UpdateSchemaDescription(&resp.Schema) +} + +func (r *streamWorkspaceRS) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var streamWorkspacePlan TFStreamWorkspaceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &streamWorkspacePlan)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := r.Client.AtlasV2 + projectID := streamWorkspacePlan.ProjectID.ValueString() + streamWorkspaceReq, diags := NewStreamWorkspaceCreateReq(ctx, &streamWorkspacePlan) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + apiResp, _, err := connV2.StreamsApi.CreateStreamInstance(ctx, projectID, streamWorkspaceReq).Execute() + if err != nil { + resp.Diagnostics.AddError("error creating resource", err.Error()) + return + } + + newStreamWorkspaceModel, diags := NewTFStreamWorkspace(ctx, apiResp) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newStreamWorkspaceModel)...) +} + +func (r *streamWorkspaceRS) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var streamWorkspaceState TFStreamWorkspaceModel + resp.Diagnostics.Append(req.State.Get(ctx, &streamWorkspaceState)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := r.Client.AtlasV2 + projectID := streamWorkspaceState.ProjectID.ValueString() + workspaceName := streamWorkspaceState.WorkspaceName.ValueString() + apiResp, getResp, err := connV2.StreamsApi.GetStreamInstance(ctx, projectID, workspaceName).Execute() + if err != nil { + if validate.StatusNotFound(getResp) { + resp.State.RemoveResource(ctx) + return + } + resp.Diagnostics.AddError("error fetching resource", err.Error()) + return + } + + newStreamWorkspaceModel, diags := NewTFStreamWorkspace(ctx, apiResp) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newStreamWorkspaceModel)...) +} + +func (r *streamWorkspaceRS) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var streamWorkspacePlan TFStreamWorkspaceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &streamWorkspacePlan)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := r.Client.AtlasV2 + projectID := streamWorkspacePlan.ProjectID.ValueString() + workspaceName := streamWorkspacePlan.WorkspaceName.ValueString() + streamWorkspaceReq, diags := NewStreamWorkspaceUpdateReq(ctx, &streamWorkspacePlan) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + apiResp, _, err := connV2.StreamsApi.UpdateStreamInstance(ctx, projectID, workspaceName, streamWorkspaceReq).Execute() + if err != nil { + resp.Diagnostics.AddError("error updating resource", err.Error()) + return + } + + newStreamWorkspaceModel, diags := NewTFStreamWorkspace(ctx, apiResp) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newStreamWorkspaceModel)...) +} + +func (r *streamWorkspaceRS) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var streamWorkspaceState *TFStreamWorkspaceModel + resp.Diagnostics.Append(req.State.Get(ctx, &streamWorkspaceState)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := r.Client.AtlasV2 + projectID := streamWorkspaceState.ProjectID.ValueString() + workspaceName := streamWorkspaceState.WorkspaceName.ValueString() + if _, err := connV2.StreamsApi.DeleteStreamInstance(ctx, projectID, workspaceName).Execute(); err != nil { + resp.Diagnostics.AddError("error during resource delete", err.Error()) + return + } +} + +func (r *streamWorkspaceRS) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + projectID, workspaceName, err := splitStreamWorkspaceImportID(req.ID) + if err != nil { + resp.Diagnostics.AddError("error splitting search deployment import ID", err.Error()) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectID)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("workspace_name"), workspaceName)...) +} + +func splitStreamWorkspaceImportID(id string) (projectID, workspaceName string, err error) { + var re = regexp.MustCompile(`(?s)^([0-9a-fA-F]{24})-(.*)$`) + parts := re.FindStringSubmatch(id) + + if len(parts) != 3 { + err = errors.New("use the format {project_id}-{workspace_name}") + return + } + + projectID = parts[1] + workspaceName = parts[2] + return +} diff --git a/internal/service/streamworkspace/resource_stream_workspace_migration_test.go b/internal/service/streamworkspace/resource_stream_workspace_migration_test.go new file mode 100644 index 0000000000..d7e1308cf5 --- /dev/null +++ b/internal/service/streamworkspace/resource_stream_workspace_migration_test.go @@ -0,0 +1,33 @@ +package streamworkspace_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" +) + +func TestMigStreamRSStreamWorkspace_basic(t *testing.T) { + var ( + resourceName = "mongodbatlas_stream_workspace.test" + projectID = acc.ProjectIDExecution(t) + workspaceName = acc.RandomName() + config = acc.StreamInstanceConfig(projectID, workspaceName, region, cloudProvider) + ) + mig.SkipIfVersionBelow(t, "1.16.0") // when reached GA + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + CheckDestroy: acc.CheckDestroyStreamInstance, + Steps: []resource.TestStep{ + { + ExternalProviders: mig.ExternalProviders(), + Config: config, + Check: streamWorkspaceAttributeChecks(resourceName, workspaceName, region, cloudProvider), + }, + mig.TestStepCheckEmptyPlan(config), + }, + }) +} diff --git a/internal/service/streamworkspace/resource_stream_workspace_test.go b/internal/service/streamworkspace/resource_stream_workspace_test.go new file mode 100644 index 0000000000..f557a864ad --- /dev/null +++ b/internal/service/streamworkspace/resource_stream_workspace_test.go @@ -0,0 +1,104 @@ +package streamworkspace_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" +) + +func TestAccStreamRSStreamWorkspace_basic(t *testing.T) { + var ( + resourceName = "mongodbatlas_stream_workspace.test" + projectID = acc.ProjectIDExecution(t) + workspaceName = acc.RandomName() + ) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyStreamInstance, + Steps: []resource.TestStep{ + { + Config: acc.StreamInstanceConfig(projectID, workspaceName, region, cloudProvider), // as of now there are no values that can be updated because only one region is supported + Check: resource.ComposeAggregateTestCheckFunc( + streamWorkspaceAttributeChecks(resourceName, workspaceName, region, cloudProvider), + resource.TestCheckResourceAttr(resourceName, "stream_config.tier", "SP30"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: checkStreamWorkspaceImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStreamRSStreamWorkspace_withStreamConfig(t *testing.T) { + var ( + resourceName = "mongodbatlas_stream_workspace.test" + projectID = acc.ProjectIDExecution(t) + workspaceName = acc.RandomName() + ) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyStreamInstance, + Steps: []resource.TestStep{ + { + Config: acc.StreamInstanceWithStreamConfigConfig(projectID, workspaceName, region, cloudProvider, "SP10"), // as of now there are no values that can be updated because only one region is supported + Check: resource.ComposeAggregateTestCheckFunc( + streamWorkspaceAttributeChecks(resourceName, workspaceName, region, cloudProvider), + resource.TestCheckResourceAttr(resourceName, "stream_config.tier", "SP10"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: checkStreamWorkspaceImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func streamWorkspaceAttributeChecks(resourceName, workspaceName, region, cloudProvider string) resource.TestCheckFunc { + resourceChecks := []resource.TestCheckFunc{ + checkSearchWorkspaceExists(), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttrSet(resourceName, "project_id"), + resource.TestCheckResourceAttr(resourceName, "workspace_name", workspaceName), + resource.TestCheckResourceAttr(resourceName, "data_process_region.region", region), + resource.TestCheckResourceAttr(resourceName, "data_process_region.cloud_provider", cloudProvider), + resource.TestCheckResourceAttr(resourceName, "hostnames.#", "1"), + } + return resource.ComposeAggregateTestCheckFunc(resourceChecks...) +} + +func checkStreamWorkspaceImportStateIDFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("not found: %s", resourceName) + } + return fmt.Sprintf("%s-%s", rs.Primary.Attributes["project_id"], rs.Primary.Attributes["workspace_name"]), nil + } +} + +func checkSearchWorkspaceExists() resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type == "mongodbatlas_stream_workspace" { + _, _, err := acc.ConnV2().StreamsApi.GetStreamInstance(context.Background(), rs.Primary.Attributes["project_id"], rs.Primary.Attributes["workspace_name"]).Execute() + if err != nil { + return fmt.Errorf("stream workspace (%s:%s) does not exist", rs.Primary.Attributes["project_id"], rs.Primary.Attributes["workspace_name"]) + } + } + } + return nil + } +} From 4bb28a64b018c97f0c15521f41cbd76e58bc3893 Mon Sep 17 00:00:00 2001 From: Wesley Nabo Date: Tue, 5 Aug 2025 18:23:47 -0500 Subject: [PATCH 2/3] * make doc changes for stream_workspace --- docs/data-sources/stream_workspace.md | 38 +++++++++++++++++++ docs/data-sources/stream_workspaces.md | 48 ++++++++++++++++++++++++ docs/resources/stream_workspace.md | 52 ++++++++++++++++++++++++++ internal/provider/provider.go | 4 ++ 4 files changed, 142 insertions(+) create mode 100644 docs/data-sources/stream_workspace.md create mode 100644 docs/data-sources/stream_workspaces.md create mode 100644 docs/resources/stream_workspace.md diff --git a/docs/data-sources/stream_workspace.md b/docs/data-sources/stream_workspace.md new file mode 100644 index 0000000000..0e45d38756 --- /dev/null +++ b/docs/data-sources/stream_workspace.md @@ -0,0 +1,38 @@ +# Data Source: mongodbatlas_stream_workspace + +`mongodbatlas_stream_workspace` describes a stream workspace. + +## Example Usage + +```terraform +data "mongodbatlas_stream_workspace" "example" { + project_id = "" + workspace_name = "" +} +``` + +## Argument Reference + +* `project_id` - (Required) Unique 24-hexadecimal digit string that identifies your project. +* `workspace_name` - (Required) Human-readable label that identifies the stream workspace. + +## Attributes Reference + +* `data_process_region` - Defines the cloud service provider and region where MongoDB Cloud performs stream processing. See [data process region](#data-process-region). +* `hostnames` - List that contains the hostnames assigned to the stream workspace. +* `stream_config` - Defines the configuration options for an Atlas Stream Processing Workspace. See [stream config](#stream-config) + + +### Data Process Region + +* `cloud_provider` - Label that identifies the cloud service provider where MongoDB Cloud performs stream processing. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `region` - Name of the cloud provider region hosting Atlas Stream Processing. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. + +### Stream Config + +* `tier` - Selected tier for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `defaultTier` - Selected defaultTier for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `maxTierSize` - Selected maxTierSize for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. + +To learn more, see: [MongoDB Atlas API - Stream Instance](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) Documentation. +The [Terraform Provider Examples Section](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/examples/mongodbatlas_stream_instance/atlas-streams-user-journey.md) also contains details on the overall support for Atlas Streams Processing in Terraform. diff --git a/docs/data-sources/stream_workspaces.md b/docs/data-sources/stream_workspaces.md new file mode 100644 index 0000000000..7dbc4e8eb2 --- /dev/null +++ b/docs/data-sources/stream_workspaces.md @@ -0,0 +1,48 @@ +# Data Source: mongodbatlas_stream_workspaces + +`mongodbatlas_stream_workspaces` describes the stream workspaces defined in a project. + +## Example Usage + +```terraform +data "mongodbatlas_stream_workspaces" "test" { + project_id = "" +} +``` + +## Argument Reference + +* `project_id` - (Required) Unique 24-hexadecimal digit string that identifies your project. + +* `page_num` - (Optional) Number of the page that displays the current set of the total objects that the response returns. Defaults to `1`. +* `items_per_page` - (Optional) Number of items that the response returns per page, up to a maximum of `500`. Defaults to `100`. + + +## Attributes Reference + +In addition to all arguments above, it also exports the following attributes: + +* `results` - A list where each element contains a Stream Workspace. +* `total_count` - Count of the total number of items in the result set. The count might be greater than the number of objects in the results array if the entire result set is paginated. + +### Stream Workspace + +* `project_id` - Unique 24-hexadecimal digit string that identifies your project. +* `workspace_name` - Human-readable label that identifies the stream workspace. +* `data_process_region` - Defines the cloud service provider and region where MongoDB Cloud performs stream processing. See [data process region](#data-process-region). +* `hostnames` - List that contains the hostnames assigned to the stream workspace. +* `stream_config` - Defines the configuration options for an Atlas Stream Processing Workspace. See [stream config](#stream-config) + +### Data Process Region + +* `cloud_provider` - Label that identifies the cloud service provider where MongoDB Cloud performs stream processing. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `region` - Name of the cloud provider region hosting Atlas Stream Processing. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. + +### Stream Config + +* `tier` - Selected tier for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `defaultTier` - Selected defaultTier for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `maxTierSize` - Selected maxTierSize for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. + +To learn more, see: [MongoDB Atlas API - Stream Instance](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) Documentation. +The [Terraform Provider Examples Section](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/examples/mongodbatlas_stream_instance/atlas-streams-user-journey.md) also contains details on the overall support for Atlas Streams Processing in Terraform. diff --git a/docs/resources/stream_workspace.md b/docs/resources/stream_workspace.md new file mode 100644 index 0000000000..88c90958f1 --- /dev/null +++ b/docs/resources/stream_workspace.md @@ -0,0 +1,52 @@ +# Resource: mongodbatlas_stream_workspace + +`mongodbatlas_stream_workspace` provides a Stream Workspace resource. The resource lets you create, edit, and delete stream workspaces in a project. + +## Example Usage + +```terraform +resource "mongodbatlas_stream_workspace" "test" { + project_id = var.project_id + workspace_name = "WorkspaceName" + data_process_region = { + region = "VIRGINIA_USA" + cloud_provider = "AWS" + } +} +``` + +## Argument Reference + +* `project_id` - (Required) Unique 24-hexadecimal digit string that identifies your project. +* `workspace_name` - (Required) Human-readable label that identifies the stream workspace. +* `data_process_region` - (Required) Cloud service provider and region where MongoDB Cloud performs stream processing. See [data process region](#data-process-region). +* `stream_config` - (Optional) Configuration options for an Atlas Stream Processing Workspace. See [stream config](#stream-config) + + +### Data Process Region + +* `cloud_provider` - (Required) Label that identifies the cloud service provider where MongoDB Cloud performs stream processing. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `region` - (Required) Name of the cloud provider region hosting Atlas Stream Processing. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. + +### Stream Config + +* `tier` - (Required) Selected tier for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `defaultTier` - Selected defaultTier for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. +* `maxTierSize` - Selected maxTierSize for the Stream Workspace. Configures Memory / VCPU allowances. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) describes the valid values. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `hostnames` - List that contains the hostnames assigned to the stream workspace. + +## Import + +You can import stream workspace resource using the project ID and workspace name, in the format `PROJECT_ID-INSTANCE_NAME`. For example: + +``` +$ terraform import mongodbatlas_stream_workspace.test 650972848269185c55f40ca1-WorkspaceName +``` + +To learn more, see: [MongoDB Atlas API - Stream Instance](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Streams/operation/createStreamInstance) Documentation. +The [Terraform Provider Examples Section](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/examples/mongodbatlas_stream_instance/atlas-streams-user-journey.md) also contains details on the overall support for Atlas Streams Processing in Terraform. diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 4873ddcbf1..508e6d8b7e 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -48,6 +48,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streaminstance" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamprivatelinkendpoint" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamprocessor" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamworkspace" "github.com/mongodb/terraform-provider-mongodbatlas/version" ) @@ -444,6 +445,8 @@ func (p *MongodbtlasProvider) DataSources(context.Context) []func() datasource.D projectipaddresses.DataSource, streamprocessor.DataSource, streamprocessor.PluralDataSource, + streamworkspace.DataSource, + streamworkspace.PluralDataSource, encryptionatrest.DataSource, encryptionatrestprivateendpoint.DataSource, encryptionatrestprivateendpoint.PluralDataSource, @@ -478,6 +481,7 @@ func (p *MongodbtlasProvider) Resources(context.Context) []func() resource.Resou searchdeployment.Resource, pushbasedlogexport.Resource, streaminstance.Resource, + streamworkspace.Resource, streamconnection.Resource, streamprocessor.Resource, encryptionatrestprivateendpoint.Resource, From e729967cb1f6edb1382fd86fa5a927d0768b9bde Mon Sep 17 00:00:00 2001 From: Wesley Nabo Date: Wed, 6 Aug 2025 10:25:53 -0500 Subject: [PATCH 3/3] * push correct changelog --- .changelog/3559.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/3559.txt diff --git a/.changelog/3559.txt b/.changelog/3559.txt new file mode 100644 index 0000000000..db7c662531 --- /dev/null +++ b/.changelog/3559.txt @@ -0,0 +1,3 @@ +```release-note:new-resource + +```