diff --git a/internal/service/workspaces/exports_test.go b/internal/service/workspaces/exports_test.go index d770166e197e..e6c32abb620d 100644 --- a/internal/service/workspaces/exports_test.go +++ b/internal/service/workspaces/exports_test.go @@ -13,5 +13,6 @@ var ( FindConnectionAliasByID = findConnectionAliasByID FindDirectoryByID = findDirectoryByID FindIPGroupByID = findIPGroupByID + FindPoolByID = findPoolByID FindWorkspaceByID = findWorkspaceByID ) diff --git a/internal/service/workspaces/pool.go b/internal/service/workspaces/pool.go new file mode 100644 index 000000000000..4193f230e7ea --- /dev/null +++ b/internal/service/workspaces/pool.go @@ -0,0 +1,549 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspaces + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workspaces" + "github.com/aws/aws-sdk-go-v2/service/workspaces/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_workspaces_pool", name="Pool") +// @Tags(identifierAttribute="id") +func ResourcePool() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourcePoolCreate, + ReadWithoutTimeout: resourcePoolRead, + UpdateWithoutTimeout: resourcePoolUpdate, + DeleteWithoutTimeout: resourcePoolDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "application_settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrS3BucketName: { + Type: schema.TypeString, + Computed: true, + }, + "settings_group": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + names.AttrStatus: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(enum.Slice(types.ApplicationSettingsStatusEnumEnabled, types.ApplicationSettingsStatusEnumDisabled), false), + }, + }, + }, + }, + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "bundle_id": { + Type: schema.TypeString, + Required: true, + }, + "capacity": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "desired_user_sessions": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + names.AttrDescription: { + Type: schema.TypeString, + Required: true, + }, + "directory_id": { + Type: schema.TypeString, + Required: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + names.AttrState: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "timeout_settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disconnect_timeout_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1, 36000), + }, + "idle_disconnect_timeout_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1, 36000), + }, + "max_user_duration_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1, 432000), + }, + }, + }, + }, + }, + } +} + +const ( + ResNamePool = "Pool" +) + +func resourcePoolCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) + + in := &workspaces.CreateWorkspacesPoolInput{ + BundleId: aws.String(d.Get("bundle_id").(string)), + Description: aws.String(d.Get(names.AttrDescription).(string)), + DirectoryId: aws.String(d.Get("directory_id").(string)), + PoolName: aws.String(d.Get(names.AttrName).(string)), + Tags: getTagsIn(ctx), + } + if v, ok := d.GetOk("application_settings"); ok { + in.ApplicationSettings = expandApplicationSettings(v.([]any)) + } + if v, ok := d.GetOk("capacity"); ok { + in.Capacity = &types.Capacity{ + DesiredUserSessions: expandCapacity(v.([]any)).DesiredUserSessions, + } + } + if v, ok := d.GetOk("timeout_settings"); ok { + in.TimeoutSettings = expandTimeoutSettings(v.([]any)) + } + + out, err := conn.CreateWorkspacesPool(ctx, in) + if err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionCreating, ResNamePool, d.Get(names.AttrName).(string), err) + } + + d.SetId(aws.ToString(out.WorkspacesPool.PoolId)) + + if _, err := waitPoolCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionCreating, ResNamePool, d.Get(names.AttrName).(string), err) + } + + return append(diags, resourcePoolRead(ctx, d, meta)...) +} + +func resourcePoolRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) + + out, err := findPoolByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] WorkSpaces Pool (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionReading, ResNamePool, d.Id(), err) + } + + if err := d.Set("application_settings", flattenApplicationSettings(out.ApplicationSettings)); err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionSetting, ResNamePool, d.Id(), err) + } + d.Set(names.AttrARN, out.PoolArn) + d.Set("bundle_id", out.BundleId) + if err := d.Set("capacity", flattenCapacity(out.CapacityStatus)); err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionSetting, ResNamePool, d.Id(), err) + } + d.Set(names.AttrDescription, out.Description) + d.Set("directory_id", out.DirectoryId) + d.Set(names.AttrID, out.PoolId) + d.Set(names.AttrName, out.PoolName) + d.Set(names.AttrState, out.State) + if err := d.Set("timeout_settings", flattenTimeoutSettings(out.TimeoutSettings)); err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionSetting, ResNamePool, d.Id(), err) + } + + return diags +} + +func resourcePoolUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) + + update := false // whether we need to update the pool + shouldStop := false // Check if the pool needs to be stopped before updating + currentState := d.Get(names.AttrState).(string) + + in := &workspaces.UpdateWorkspacesPoolInput{ + PoolId: aws.String(d.Id()), + } + + if d.HasChange("bundle_id") { + shouldStop = true + in.BundleId = aws.String(d.Get("bundle_id").(string)) + update = true + } + + if d.HasChange("directory_id") { + shouldStop = true + in.DirectoryId = aws.String(d.Get("directory_id").(string)) + update = true + } + + if d.HasChange("application_settings") { + in.ApplicationSettings = expandApplicationSettings(d.Get("application_settings").([]any)) + update = true + } + + if d.HasChange("capacity") { + in.Capacity.DesiredUserSessions = expandCapacity(d.Get("capacity").([]any)).DesiredUserSessions + update = true + } + + if d.HasChange("timeout_settings") { + timeoutSettings := expandTimeoutSettings(d.Get("timeout_settings").([]any)) + + old, new := d.GetChange("timeout_settings") + oldSettings := old.([]any) + newSettings := new.([]any) + + if len(oldSettings) > 0 && len(newSettings) > 0 { + oldMap := oldSettings[0].(map[string]any) + newMap := newSettings[0].(map[string]any) + + oldVal, oldOk := oldMap["max_user_duration_in_seconds"].(int) + newVal, newOk := newMap["max_user_duration_in_seconds"].(int) + + if oldOk && newOk && oldVal != newVal { + log.Printf("[DEBUG] max_user_duration_in_seconds changed from %d to %d", oldVal, newVal) + shouldStop = true + } + } + + in.TimeoutSettings = timeoutSettings + update = true + } + + if shouldStop && currentState != string(types.WorkspacesPoolStateStopped) { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionUpdating, ResNamePool, d.Id(), fmt.Errorf("pool must be stopped to apply changes")) + } + + if !update { + return diags + } + + log.Printf("[DEBUG] Updating WorkSpaces Pool (%s)", d.Id()) + _, err := conn.UpdateWorkspacesPool(ctx, in) + if err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionUpdating, ResNamePool, d.Id(), err) + } + + if _, err := waitPoolUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionUpdating, ResNamePool, d.Id(), err) + } + + return append(diags, resourcePoolRead(ctx, d, meta)...) +} + +func resourcePoolDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) + + log.Printf("[DEBUG] Deleting WorkSpaces Pool (%s)", d.Id()) + + pool, err := findPoolByID(ctx, conn, d.Id()) + if err != nil { + if tfresource.NotFound(err) { + return diags + } + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionReading, ResNamePool, d.Id(), err) + } + if pool.State != types.WorkspacesPoolStateStopped { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionUpdating, ResNamePool, d.Id(), fmt.Errorf("pool must be stopped to delete")) + } + + input := &workspaces.TerminateWorkspacesPoolInput{ + PoolId: aws.String(d.Id()), + } + + if _, err := conn.TerminateWorkspacesPool(ctx, input); err != nil { + if !tfresource.NotFound(err) { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionDeleting+" [2]", ResNamePool, d.Id(), err) + } + return diags + } + + _, err = waitPoolDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil && !tfresource.NotFound(err) { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionDeleting+" [3]", ResNamePool, d.Id(), err) + } + + return diags +} + +func waitPoolCreated(ctx context.Context, conn *workspaces.Client, id string, timeout time.Duration) (*types.WorkspacesPool, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.WorkspacesPoolStateCreating), + Target: enum.Slice(types.WorkspacesPoolStateStopped), + Refresh: statusPool(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*types.WorkspacesPool); ok { + return out, err + } + return nil, err +} + +func waitPoolUpdated(ctx context.Context, conn *workspaces.Client, id string, timeout time.Duration) (*types.WorkspacesPool, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.WorkspacesPoolStateUpdating), + Target: enum.Slice(types.WorkspacesPoolStateStopped, types.WorkspacesPoolStateRunning), + Refresh: statusPool(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*types.WorkspacesPool); ok { + return out, err + } + return nil, err +} + +func waitPoolDeleted(ctx context.Context, conn *workspaces.Client, id string, timeout time.Duration) (*types.WorkspacesPool, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.WorkspacesPoolStateDeleting), + Target: []string{}, + Refresh: statusPool(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if out, ok := outputRaw.(*types.WorkspacesPool); ok { + return out, err + } + + return nil, err +} + +func statusPool(ctx context.Context, conn *workspaces.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := findPoolByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.State), nil + } +} + +func findPoolByID(ctx context.Context, conn *workspaces.Client, id string) (*types.WorkspacesPool, error) { + input := &workspaces.DescribeWorkspacesPoolsInput{ + PoolIds: []string{id}, + } + + output, err := conn.DescribeWorkspacesPools(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if len(output.WorkspacesPools) == 0 { + return nil, tfresource.NewEmptyResultError(input) + } + + return &output.WorkspacesPools[0], nil +} + +func findPoolByName(ctx context.Context, conn *workspaces.Client, name string) (*types.WorkspacesPool, error) { + input := &workspaces.DescribeWorkspacesPoolsInput{} + var result *types.WorkspacesPool + + output, err := conn.DescribeWorkspacesPools(ctx, input) + if err != nil { + return nil, err + } + + for _, pool := range output.WorkspacesPools { + if aws.ToString(pool.PoolName) == name { + result = &pool + break + } + } + + if result == nil { + return nil, &retry.NotFoundError{ + LastRequest: input, + } + } + + return result, nil +} + +func expandApplicationSettings(tfList []any) *types.ApplicationSettingsRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + tfMap := tfList[0].(map[string]any) + apiObject := &types.ApplicationSettingsRequest{} + if tfMap[names.AttrStatus] != "" { + apiObject.Status = types.ApplicationSettingsStatusEnum(tfMap[names.AttrStatus].(string)) + } + if tfMap["settings_group"] != "" { + settingsGroup := tfMap["settings_group"].(string) + apiObject.SettingsGroup = &settingsGroup + } + return apiObject +} + +func expandCapacity(tfList []any) *types.Capacity { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + tfMap := tfList[0].(map[string]any) + apiObject := &types.Capacity{} + + if tfMap["desired_user_sessions"] != nil { + desiredUserSessions := int32(tfMap["desired_user_sessions"].(int)) + apiObject.DesiredUserSessions = &desiredUserSessions + } + return apiObject +} + +func expandTimeoutSettings(tfList []any) *types.TimeoutSettings { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + tfMap := tfList[0].(map[string]any) + apiObject := &types.TimeoutSettings{} + + if tfMap["disconnect_timeout_in_seconds"] != 0 { + disconnectTimeoutInSeconds := int32(tfMap["disconnect_timeout_in_seconds"].(int)) + apiObject.DisconnectTimeoutInSeconds = &disconnectTimeoutInSeconds + } + if tfMap["idle_disconnect_timeout_in_seconds"] != 0 { + idleDisconnectTimeoutInSeconds := int32(tfMap["idle_disconnect_timeout_in_seconds"].(int)) + apiObject.IdleDisconnectTimeoutInSeconds = &idleDisconnectTimeoutInSeconds + } + if tfMap["max_user_duration_in_seconds"] != 0 { + maxUserDurationInSeconds := int32(tfMap["max_user_duration_in_seconds"].(int)) + apiObject.MaxUserDurationInSeconds = &maxUserDurationInSeconds + } + return apiObject +} + +func flattenApplicationSettings(apiObject *types.ApplicationSettingsResponse) []any { + if apiObject == nil { + return nil + } + + m := map[string]any{ + names.AttrStatus: string(apiObject.Status), + } + + if apiObject.S3BucketName != nil { + m[names.AttrS3BucketName] = aws.ToString(apiObject.S3BucketName) + } + + if apiObject.SettingsGroup != nil { + m["settings_group"] = aws.ToString(apiObject.SettingsGroup) + } + + return []any{m} +} + +func flattenCapacity(apiObject *types.CapacityStatus) []any { + if apiObject == nil { + return nil + } + return []any{ + map[string]any{ + "desired_user_sessions": apiObject.DesiredUserSessions, + }, + } +} + +func flattenTimeoutSettings(apiObject *types.TimeoutSettings) []any { + if apiObject == nil { + return nil + } + return []any{ + map[string]any{ + "max_user_duration_in_seconds": apiObject.MaxUserDurationInSeconds, + "disconnect_timeout_in_seconds": apiObject.DisconnectTimeoutInSeconds, + "idle_disconnect_timeout_in_seconds": apiObject.IdleDisconnectTimeoutInSeconds, + }, + } +} diff --git a/internal/service/workspaces/pool_data_source.go b/internal/service/workspaces/pool_data_source.go new file mode 100644 index 000000000000..cce9de0fdaeb --- /dev/null +++ b/internal/service/workspaces/pool_data_source.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspaces + +import ( + "context" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workspaces/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKDataSource("aws_workspaces_pool", name="Pool") +// @Tags(identifierAttribute="id") +func dataSourcePool() *schema.Resource { + return &schema.Resource{ + ReadWithoutTimeout: dataSourcePoolRead, + Schema: map[string]*schema.Schema{ + "application_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrS3BucketName: { + Type: schema.TypeString, + Computed: true, + }, + "settings_group": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrStatus: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "bundle_id": { + Type: schema.TypeString, + Computed: true, + }, + "capacity": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "desired_user_sessions": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + names.AttrDescription: { + Type: schema.TypeString, + Computed: true, + }, + "directory_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ExactlyOneOf: []string{ + names.AttrID, + names.AttrName, + }, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + Optional: true, + ExactlyOneOf: []string{ + names.AttrID, + names.AttrName, + }, + }, + names.AttrState: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchemaComputed(), + "timeout_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disconnect_timeout_in_seconds": { + Type: schema.TypeInt, + Computed: true, + }, + "idle_disconnect_timeout_in_seconds": { + Type: schema.TypeInt, + Computed: true, + }, + "max_user_duration_in_seconds": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + } +} + +const ( + DSNamePool = "Pool Data Source" +) + +func dataSourcePoolRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) + + var out *types.WorkspacesPool + var err error + + if v, ok := d.GetOk(names.AttrID); ok { + poolID := v.(string) + out, err = findPoolByID(ctx, conn, poolID) + d.SetId(poolID) + } else if v, ok := d.GetOk(names.AttrName); ok { + poolName := v.(string) + out, err = findPoolByName(ctx, conn, poolName) + + if out != nil { + d.SetId(aws.ToString(out.PoolId)) + } + } + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] WorkSpaces Pool (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionReading, ResNamePool, d.Id(), err) + } + + if err := d.Set("application_settings", flattenApplicationSettings(out.ApplicationSettings)); err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionSetting, ResNamePool, d.Id(), err) + } + d.Set(names.AttrARN, out.PoolArn) + d.Set("bundle_id", out.BundleId) + if err := d.Set("capacity", flattenCapacity(out.CapacityStatus)); err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionSetting, ResNamePool, d.Id(), err) + } + d.Set(names.AttrDescription, out.Description) + d.Set("directory_id", out.DirectoryId) + d.Set(names.AttrID, out.PoolId) + d.Set(names.AttrName, out.PoolName) + d.Set(names.AttrState, out.State) + if err := d.Set("timeout_settings", flattenTimeoutSettings(out.TimeoutSettings)); err != nil { + return create.AppendDiagError(diags, names.WorkSpaces, create.ErrActionSetting, ResNamePool, d.Id(), err) + } + + return diags +} diff --git a/internal/service/workspaces/pool_data_source_test.go b/internal/service/workspaces/pool_data_source_test.go new file mode 100644 index 000000000000..ade909f15d5b --- /dev/null +++ b/internal/service/workspaces/pool_data_source_test.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspaces_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/workspaces" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccWorkSpacesPoolDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + var pool workspaces.DescribeWorkspacesPoolsOutput + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + datasourceName := "data.aws_workspaces_pool.test" + resourceName := "aws_workspaces_pool.test" + resourceBundleName := "data.aws_workspaces_bundle.standard" + resourceDirectory := "aws_workspaces_directory.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, strings.ToLower(workspaces.ServiceID)) + testAccPreCheckPool(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, strings.ToLower(workspaces.ServiceID)), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPoolDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPoolExists(ctx, datasourceName, &pool), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(datasourceName, "bundle_id", resourceBundleName, names.AttrID), + resource.TestCheckResourceAttr(datasourceName, "capacity.0.desired_user_sessions", "1"), + resource.TestCheckResourceAttr(datasourceName, names.AttrDescription, rName), + resource.TestCheckResourceAttrPair(datasourceName, "directory_id", resourceDirectory, "directory_id"), + resource.TestCheckResourceAttrSet(datasourceName, names.AttrID), + resource.TestCheckResourceAttr(datasourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(datasourceName, names.AttrState), + resource.TestCheckResourceAttr(datasourceName, "application_settings.#", "1"), + resource.TestCheckResourceAttr(datasourceName, "application_settings.0.status", "ENABLED"), + resource.TestCheckResourceAttr(datasourceName, "application_settings.0.settings_group", "test"), + resource.TestCheckResourceAttr(datasourceName, "timeout_settings.#", "1"), + resource.TestCheckResourceAttr(datasourceName, "timeout_settings.0.disconnect_timeout_in_seconds", "2000"), + resource.TestCheckResourceAttr(datasourceName, "timeout_settings.0.idle_disconnect_timeout_in_seconds", "2000"), + resource.TestCheckResourceAttr(datasourceName, "timeout_settings.0.max_user_duration_in_seconds", "2000"), + ), + }, + }, + }) +} + +func testAccPoolDataSourceConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccPoolConfig_base(rName), + fmt.Sprintf(` +resource "aws_workspaces_pool" "test" { + application_settings { + status = "ENABLED" + settings_group = "test" + } + bundle_id = data.aws_workspaces_bundle.standard.id + capacity { + desired_user_sessions = 1 + } + description = %[1]q + directory_id = aws_workspaces_directory.test.directory_id + name = %[1]q + timeout_settings { + disconnect_timeout_in_seconds = 2000 + idle_disconnect_timeout_in_seconds = 2000 + max_user_duration_in_seconds = 2000 + } +} + +data "aws_workspaces_pool" "test" { + id = aws_workspaces_pool.test.id +} +`, rName)) +} diff --git a/internal/service/workspaces/pool_test.go b/internal/service/workspaces/pool_test.go new file mode 100644 index 000000000000..26015f0e5a14 --- /dev/null +++ b/internal/service/workspaces/pool_test.go @@ -0,0 +1,385 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspaces_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/workspaces" + "github.com/aws/aws-sdk-go-v2/service/workspaces/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfworkspaces "github.com/hashicorp/terraform-provider-aws/internal/service/workspaces" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccWorkSpacesPool_basic(t *testing.T) { + ctx := acctest.Context(t) + var pool workspaces.DescribeWorkspacesPoolsOutput + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspaces_pool.test" + resourceBundleName := "data.aws_workspaces_bundle.standard" + resourceDirectory := "aws_workspaces_directory.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, strings.ToLower(workspaces.ServiceID)) + testAccPreCheckPool(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, strings.ToLower(workspaces.ServiceID)), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPoolConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPoolExists(ctx, resourceName, &pool), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "workspaces", regexache.MustCompile(`workspacespool/wspool-[0-9a-z]+`)), + resource.TestCheckResourceAttrPair(resourceName, "bundle_id", resourceBundleName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "capacity.0.desired_user_sessions", "1"), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, rName), + resource.TestCheckResourceAttrPair(resourceName, "directory_id", resourceDirectory, "directory_id"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrState), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrApplyImmediately, "user"}, + }, + }, + }) +} + +func testAccWorkSpacesPool_disappears(t *testing.T) { + ctx := acctest.Context(t) + + var pool workspaces.DescribeWorkspacesPoolsOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspaces_pool.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, strings.ToLower(workspaces.ServiceID)) + testAccPreCheckPool(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, strings.ToLower(workspaces.ServiceID)), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPoolConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPoolExists(ctx, resourceName, &pool), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfworkspaces.ResourcePool(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckPoolDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspaces_pool" { + continue + } + _, err := tfworkspaces.FindPoolByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.WorkSpaces, create.ErrActionCheckingDestroyed, tfworkspaces.ResNamePool, rs.Primary.ID, err) + } + + return create.Error(names.WorkSpaces, create.ErrActionCheckingDestroyed, tfworkspaces.ResNamePool, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckPoolExists(ctx context.Context, name string, pool *workspaces.DescribeWorkspacesPoolsOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.WorkSpaces, create.ErrActionCheckingExistence, tfworkspaces.ResNamePool, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.WorkSpaces, create.ErrActionCheckingExistence, tfworkspaces.ResNamePool, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) + + resp, err := tfworkspaces.FindPoolByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.WorkSpaces, create.ErrActionCheckingExistence, tfworkspaces.ResNamePool, rs.Primary.ID, err) + } + + *pool = workspaces.DescribeWorkspacesPoolsOutput{ + WorkspacesPools: []types.WorkspacesPool{*resp}, + } + + return nil + } +} + +func testAccPreCheckPool(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) + + input := &workspaces.DescribeWorkspacesPoolsInput{} + + _, err := conn.DescribeWorkspacesPools(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccPool_ApplicationSettings(t *testing.T) { + ctx := acctest.Context(t) + var pool workspaces.DescribeWorkspacesPoolsOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspaces_pool.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, strings.ToLower(workspaces.ServiceID)) + testAccPreCheckPool(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, strings.ToLower(workspaces.ServiceID)), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPoolConfig_ApplicationSettings(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPoolExists(ctx, resourceName, &pool), + resource.TestCheckResourceAttr(resourceName, "application_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "application_settings.0.status", "ENABLED"), + resource.TestCheckResourceAttr(resourceName, "application_settings.0.settings_group", "test"), + ), + }, + }, + }) +} + +func testAccPool_TimeoutSettings(t *testing.T) { + ctx := acctest.Context(t) + var pool workspaces.DescribeWorkspacesPoolsOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspaces_pool.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, strings.ToLower(workspaces.ServiceID)) + testAccPreCheckPool(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, strings.ToLower(workspaces.ServiceID)), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPoolConfig_TimeoutSettings(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPoolExists(ctx, resourceName, &pool), + resource.TestCheckResourceAttr(resourceName, "timeout_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "timeout_settings.0.disconnect_timeout_in_seconds", "2000"), + resource.TestCheckResourceAttr(resourceName, "timeout_settings.0.idle_disconnect_timeout_in_seconds", "2000"), + resource.TestCheckResourceAttr(resourceName, "timeout_settings.0.max_user_duration_in_seconds", "2000"), + ), + }, + }, + }) +} + +func testAccPool_TimeoutSettings_MaxUserDurationInSeconds(t *testing.T) { + ctx := acctest.Context(t) + var pool workspaces.DescribeWorkspacesPoolsOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspaces_pool.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, strings.ToLower(workspaces.ServiceID)) + testAccPreCheckPool(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, strings.ToLower(workspaces.ServiceID)), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPoolConfig_TimeoutSettings_MaxUserDurationInSeconds(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPoolExists(ctx, resourceName, &pool), + resource.TestCheckResourceAttr(resourceName, "timeout_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "timeout_settings.0.max_user_duration_in_seconds", "2000"), + ), + }, + }, + }) +} + +func testAccPoolConfig_base(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptIn(), + //lintignore:AWSAT003 + fmt.Sprintf(` +data "aws_region" "current" {} +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +data "aws_workspaces_bundle" "standard" { + owner = "AMAZON" + name = "Standard with Windows 10 (Server 2022 based) (WSP)" +} + +locals { + region_workspaces_az_ids = { + "us-east-1" = formatlist("use1-az%%d", [2, 4, 6]) + } + + workspaces_az_ids = lookup(local.region_workspaces_az_ids, data.aws_region.current.name, data.aws_availability_zones.available.zone_ids) +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "primary" { + vpc_id = aws_vpc.test.id + availability_zone_id = local.workspaces_az_ids[0] + cidr_block = "10.0.1.0/24" + + tags = { + Name = "%[1]s-primary" + } +} + +resource "aws_subnet" "secondary" { + vpc_id = aws_vpc.test.id + availability_zone_id = local.workspaces_az_ids[1] + cidr_block = "10.0.2.0/24" + + tags = { + Name = "%[1]s-secondary" + } +} + +resource "aws_workspaces_directory" "test" { + subnet_ids = [aws_subnet.primary.id, aws_subnet.secondary.id] + workspace_type = "POOLS" + workspace_directory_name = %[1]q + workspace_directory_description = %[1]q + user_identity_type = "CUSTOMER_MANAGED" + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccPoolConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccPoolConfig_base(rName), + fmt.Sprintf(` +resource "aws_workspaces_pool" "test" { + bundle_id = data.aws_workspaces_bundle.standard.id + capacity { + desired_user_sessions = 1 + } + description = %[1]q + directory_id = aws_workspaces_directory.test.directory_id + name = %[1]q +} +`, rName)) +} + +func testAccPoolConfig_ApplicationSettings(rName string) string { + return acctest.ConfigCompose( + testAccPoolConfig_base(rName), + fmt.Sprintf(` +resource "aws_workspaces_pool" "test" { + application_settings { + status = "ENABLED" + settings_group = "test" + } + bundle_id = data.aws_workspaces_bundle.standard.id + capacity { + desired_user_sessions = 1 + } + description = %[1]q + directory_id = aws_workspaces_directory.test.directory_id + name = %[1]q +} +`, rName)) +} + +func testAccPoolConfig_TimeoutSettings(rName string) string { + return acctest.ConfigCompose( + testAccPoolConfig_base(rName), + fmt.Sprintf(` +resource "aws_workspaces_pool" "test" { + bundle_id = data.aws_workspaces_bundle.standard.id + capacity { + desired_user_sessions = 1 + } + description = %[1]q + directory_id = aws_workspaces_directory.test.directory_id + name = %[1]q + timeout_settings { + disconnect_timeout_in_seconds = 2000 + idle_disconnect_timeout_in_seconds = 2000 + max_user_duration_in_seconds = 2000 + } +} +`, rName)) +} + +func testAccPoolConfig_TimeoutSettings_MaxUserDurationInSeconds(rName string) string { + return acctest.ConfigCompose( + testAccPoolConfig_base(rName), + fmt.Sprintf(` +resource "aws_workspaces_pool" "test" { + bundle_id = data.aws_workspaces_bundle.standard.id + capacity { + desired_user_sessions = 1 + } + description = %[1]q + directory_id = aws_workspaces_directory.test.directory_id + name = %[1]q + timeout_settings { + max_user_duration_in_seconds = 2000 + } +} +`, rName)) +} diff --git a/internal/service/workspaces/service_package_gen.go b/internal/service/workspaces/service_package_gen.go index 09a82280d487..a0234d7636d3 100644 --- a/internal/service/workspaces/service_package_gen.go +++ b/internal/service/workspaces/service_package_gen.go @@ -58,6 +58,15 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.Service Name: "Image", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: dataSourcePool, + TypeName: "aws_workspaces_pool", + Name: "Pool", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: dataSourceWorkspace, TypeName: "aws_workspaces_workspace", @@ -90,6 +99,15 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: ResourcePool, + TypeName: "aws_workspaces_pool", + Name: "Pool", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: resourceWorkspace, TypeName: "aws_workspaces_workspace", diff --git a/internal/service/workspaces/workspaces_data_source_test.go b/internal/service/workspaces/workspaces_data_source_test.go index c78ddd8ef652..a8b2e3261008 100644 --- a/internal/service/workspaces/workspaces_data_source_test.go +++ b/internal/service/workspaces/workspaces_data_source_test.go @@ -26,6 +26,9 @@ func TestAccWorkSpacesDataSource_serial(t *testing.T) { "Image": { acctest.CtBasic: testAccImageDataSource_basic, }, + "Pool": { + acctest.CtBasic: testAccWorkSpacesPoolDataSource_basic, + }, "Workspace": { "byWorkspaceID": testAccWorkspaceDataSource_byWorkspaceID, "byDirectoryID_userName": testAccWorkspaceDataSource_byDirectoryID_userName, diff --git a/internal/service/workspaces/workspaces_test.go b/internal/service/workspaces/workspaces_test.go index 0494ba6a0db6..196ea7d547c4 100644 --- a/internal/service/workspaces/workspaces_test.go +++ b/internal/service/workspaces/workspaces_test.go @@ -36,6 +36,13 @@ func TestAccWorkSpaces_serial(t *testing.T) { "multipleDirectories": testAccIPGroup_MultipleDirectories, "tags": testAccIPGroup_tags, }, + "Pool": { + acctest.CtBasic: testAccWorkSpacesPool_basic, + acctest.CtDisappears: testAccWorkSpacesPool_disappears, + "ApplicationSettings": testAccPool_ApplicationSettings, + "TimeoutSettings": testAccPool_TimeoutSettings, + "TimeoutSettings_maxUserDurationInSeconds": testAccPool_TimeoutSettings_MaxUserDurationInSeconds, + }, "Workspace": { acctest.CtBasic: testAccWorkspace_basic, "recreate": testAccWorkspace_recreate, diff --git a/website/docs/d/workspaces_pool.html.markdown b/website/docs/d/workspaces_pool.html.markdown new file mode 100644 index 000000000000..8b56537917df --- /dev/null +++ b/website/docs/d/workspaces_pool.html.markdown @@ -0,0 +1,57 @@ +--- +subcategory: "WorkSpaces" +layout: "aws" +page_title: "AWS: aws_workspaces_pool" +description: |- + Terraform data source for managing an AWS WorkSpaces Pool. +--- +# Data Source: aws_workspaces_pool + +Terraform data source for managing an AWS WorkSpaces Pool. + +## Example Usage + +### Basic Usage with ID + +```terraform +data "aws_workspaces_pool" "example" { + id = "wspool-12345678" +} +``` + +### Basic Usage with Name + +```terraform +data "aws_workspaces_pool" "example" { + name = "example-pool" +} +``` + +## Argument Reference + +This data source supports the following arguments: + +* `id` - ID of the WorkSpaces Pool. +* `name` - Name of the WorkSpaces Pool. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference) + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `application_settings` - Information about the application settings for the WorkSpaces Pool. + * `s3_bucket_name` - S3 bucket name for the application settings. + * `settings_group` - Name of the settings group for the application settings. + * `status` - Status of the application settings. +* `arn` - ARN of the WorkSpaces Pool. +* `bundle_id` - ID of the bundle for the WorkSpaces Pool. +* `capacity` - Information about the capacity of the WorkSpaces Pool. + * `desired_user_sessions` - Desired number of user sessions for the WorkSpaces Pool. +* `description` - Description of the WorkSpaces Pool. +* `directory_id` - ID of the directory for the WorkSpaces Pool. +* `state` - Current state of the WorkSpaces Pool. +* `tags` - Map of tags assigned to the resource. +* `timeout_settings` - Information about the timeout settings for the WorkSpaces Pool. + * `disconnect_timeout_in_seconds` - Time after disconnection when a user is logged out of their WorkSpace. + * `idle_disconnect_timeout_in_seconds` - Time after inactivity when a user is disconnected from their WorkSpace. + * `max_user_duration_in_seconds` - Maximum time that a user can be connected to their WorkSpace. diff --git a/website/docs/r/workspaces_pool.html.markdown b/website/docs/r/workspaces_pool.html.markdown new file mode 100644 index 000000000000..acbe717b339c --- /dev/null +++ b/website/docs/r/workspaces_pool.html.markdown @@ -0,0 +1,150 @@ +--- +subcategory: "WorkSpaces" +layout: "aws" +page_title: "AWS: aws_workspaces_pool" +description: |- + Terraform resource for managing an AWS WorkSpaces Pool. +--- +# Resource: aws_workspaces_pool + +Provides a WorkSpaces Pool in AWS WorkSpaces Service. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_workspaces_bundle" "example" { + owner = "AMAZON" + name = "Standard with Windows 10 (Server 2022 based) (WSP)" +} + +resource "aws_workspaces_directory" "example" { + subnet_ids = [ + aws_subnet.example_c.id, + aws_subnet.example_d.id + ] + workspace_type = "POOLS" + workspace_directory_name = "example-directory" + workspace_directory_description = "Example WorkSpaces Directory for Pools" + user_identity_type = "CUSTOMER_MANAGED" +} + +resource "aws_workspaces_pool" "example" { + bundle_id = data.aws_workspaces_bundle.example.id + name = "example-pool" + description = "Example WorkSpaces Pool" + directory_id = aws_workspaces_directory.example.directory_id + + capacity { + desired_user_sessions = 10 + } +} +``` + +### With Application Settings + +```terraform +resource "aws_workspaces_pool" "example" { + bundle_id = data.aws_workspaces_bundle.example.id + name = "example-pool" + description = "Example WorkSpaces Pool with Application Settings" + directory_id = aws_workspaces_directory.example.directory_id + + capacity { + desired_user_sessions = 10 + } + + application_settings { + status = "ENABLED" + settings_group = "my-settings-group" + } +} +``` + +### With Timeout Settings + +```terraform +resource "aws_workspaces_pool" "example" { + bundle_id = data.aws_workspaces_bundle.example.id + name = "example-pool" + description = "Example WorkSpaces Pool with Timeout Settings" + directory_id = aws_workspaces_directory.example.directory_id + + capacity { + desired_user_sessions = 10 + } + + timeout_settings { + disconnect_timeout_in_seconds = 900 + idle_disconnect_timeout_in_seconds = 900 + max_user_duration_in_seconds = 14400 + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `bundle_id` - (Required) ID of the bundle for the WorkSpaces Pool. +* `capacity` - (Required) Information about the capacity of the WorkSpaces Pool. Defined below. +* `description` - (Required) Description of the WorkSpaces Pool. +* `directory_id` - (Required) ID of the directory for the WorkSpaces Pool. +* `name` - (Required) Name of the WorkSpaces Pool. This cannot be changed after creation. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference) +* `application_settings` - (Optional) Information about the application settings for the WorkSpaces Pool. Defined below. +* `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `timeout_settings` - (Optional) Information about the timeout settings for the WorkSpaces Pool. Defined below. + +### capacity + +* `desired_user_sessions` - (Required) The desired number of user sessions for the WorkSpaces Pool. + +### application_settings + +* `settings_group` - (Optional) The name of the settings group for the application settings. +* `status` - (Required) The status of the application settings. Valid values are `ENABLED` and `DISABLED`. + +### timeout_settings + +* `disconnect_timeout_in_seconds` - (Optional) The time after disconnection when a user is logged out of their WorkSpace. Must be between 1 and 36000. +* `idle_disconnect_timeout_in_seconds` - (Optional) The time after inactivity when a user is disconnected from their WorkSpace. Must be between 1 and 36000. +* `max_user_duration_in_seconds` - (Optional) The maximum time that a user can be connected to their WorkSpace. Must be between 1 and 432000. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the WorkSpaces Pool. +* `id` - ID of the WorkSpaces Pool. +* `state` - Current state of the WorkSpaces Pool. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5 minutes`) +* `update` - (Default `5 minutes`) +* `delete` - (Default `5 minutes`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Pool using the pool ID. For example: + +```terraform +import { + to = aws_workspaces_pool.example + id = "wspool-12345678" +} +``` + +Using `terraform import`, import WorkSpaces Pool using the pool ID. For example: + +```console +% terraform import aws_workspaces_pool.example wspool-12345678 +```