diff --git a/.changelog/45632.txt b/.changelog/45632.txt new file mode 100644 index 000000000000..03522098a0e0 --- /dev/null +++ b/.changelog/45632.txt @@ -0,0 +1,2 @@ +release-note:enhancement +resource/aws_rds_integration: Add in-place update support for `integration_name` and `data_filter` arguments diff --git a/internal/service/rds/integration.go b/internal/service/rds/integration.go index ca8b3dfe7c13..75f7cc5b3f08 100644 --- a/internal/service/rds/integration.go +++ b/internal/service/rds/integration.go @@ -43,6 +43,7 @@ func newIntegrationResource(_ context.Context) (resource.ResourceWithConfigure, r := &integrationResource{} r.SetDefaultCreateTimeout(60 * time.Minute) + r.SetDefaultUpdateTimeout(30 * time.Minute) r.SetDefaultDeleteTimeout(30 * time.Minute) return r, nil @@ -80,16 +81,12 @@ func (r *integrationResource) Schema(ctx context.Context, request resource.Schem Optional: true, Computed: true, PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), stringplanmodifier.UseStateForUnknown(), }, }, names.AttrID: framework.IDAttributeDeprecatedWithAlternate(path.Root(names.AttrARN)), "integration_name": schema.StringAttribute{ Required: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, }, names.AttrKMSKeyID: schema.StringAttribute{ Optional: true, @@ -119,6 +116,7 @@ func (r *integrationResource) Schema(ctx context.Context, request resource.Schem Blocks: map[string]schema.Block{ names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ Create: true, + Update: true, Delete: true, }), }, @@ -220,6 +218,51 @@ func (r *integrationResource) Read(ctx context.Context, request resource.ReadReq response.Diagnostics.Append(response.State.Set(ctx, &data)...) } +func (r *integrationResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old integrationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().RDSClient(ctx) + + if !new.DataFilter.Equal(old.DataFilter) || + !new.IntegrationName.Equal(old.IntegrationName) { + input := &rds.ModifyIntegrationInput{ + IntegrationIdentifier: fwflex.StringFromFramework(ctx, old.ID), + } + + if !new.DataFilter.Equal(old.DataFilter) { + input.DataFilter = fwflex.StringFromFramework(ctx, new.DataFilter) + } + + if !new.IntegrationName.Equal(old.IntegrationName) { + input.IntegrationName = fwflex.StringFromFramework(ctx, new.IntegrationName) + } + + _, err := conn.ModifyIntegration(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating RDS Integration (%s)", old.ID.ValueString()), err.Error()) + + return + } + + if _, err := waitIntegrationUpdated(ctx, conn, old.ID.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for RDS Integration (%s) update", old.ID.ValueString()), err.Error()) + + return + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + func (r *integrationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { var data integrationResourceModel response.Diagnostics.Append(request.State.Get(ctx, &data)...) @@ -331,6 +374,25 @@ func waitIntegrationCreated(ctx context.Context, conn *rds.Client, arn string, t return nil, err } +func waitIntegrationUpdated(ctx context.Context, conn *rds.Client, arn string, timeout time.Duration) (*awstypes.Integration, error) { + stateConf := &sdkretry.StateChangeConf{ + Pending: []string{integrationStatusModifying}, + Target: []string{integrationStatusActive}, + Refresh: statusIntegration(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Integration); ok { + tfresource.SetLastError(err, errors.Join(tfslices.ApplyToAll(output.Errors, integrationError)...)) + + return output, err + } + + return nil, err +} + func waitIntegrationDeleted(ctx context.Context, conn *rds.Client, arn string, timeout time.Duration) (*awstypes.Integration, error) { stateConf := &sdkretry.StateChangeConf{ Pending: []string{integrationStatusDeleting, integrationStatusActive}, diff --git a/internal/service/rds/integration_test.go b/internal/service/rds/integration_test.go index 96c3605a08db..7b1cd8a9bd74 100644 --- a/internal/service/rds/integration_test.go +++ b/internal/service/rds/integration_test.go @@ -407,6 +407,86 @@ resource "aws_rds_integration" "test" { `, rName)) } +func TestAccRDSIntegration_update(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var integration awstypes.Integration + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rNameUpdated := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_integration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIntegrationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIntegrationConfig_update(rName, rName, "include: *.*"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIntegrationExists(ctx, resourceName, &integration), + resource.TestCheckResourceAttr(resourceName, "integration_name", rName), + resource.TestCheckResourceAttr(resourceName, "data_filter", "include: *.*"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + // Update integration_name + { + Config: testAccIntegrationConfig_update(rName, rNameUpdated, "include: *.*"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIntegrationExists(ctx, resourceName, &integration), + resource.TestCheckResourceAttr(resourceName, "integration_name", rNameUpdated), + resource.TestCheckResourceAttr(resourceName, "data_filter", "include: *.*"), + ), + }, + // Update data_filter + { + Config: testAccIntegrationConfig_update(rName, rNameUpdated, "include: test.*"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIntegrationExists(ctx, resourceName, &integration), + resource.TestCheckResourceAttr(resourceName, "integration_name", rNameUpdated), + resource.TestCheckResourceAttr(resourceName, "data_filter", "include: test.*"), + ), + }, + // Update both integration_name and data_filter + { + Config: testAccIntegrationConfig_update(rName, rName, "include: mydb.*"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIntegrationExists(ctx, resourceName, &integration), + resource.TestCheckResourceAttr(resourceName, "integration_name", rName), + resource.TestCheckResourceAttr(resourceName, "data_filter", "include: mydb.*"), + ), + }, + }, + }) +} + +func testAccIntegrationConfig_update(rName, integrationName, dataFilter string) string { + return acctest.ConfigCompose(testAccIntegrationConfig_base(rName), fmt.Sprintf(` +resource "aws_rds_integration" "test" { + integration_name = %[1]q + source_arn = aws_rds_cluster.test.arn + target_arn = aws_redshiftserverless_namespace.test.arn + data_filter = %[2]q + + depends_on = [ + aws_rds_cluster.test, + aws_rds_cluster_instance.test, + aws_redshiftserverless_namespace.test, + aws_redshiftserverless_workgroup.test, + aws_redshift_resource_policy.test, + ] +} +`, integrationName, dataFilter)) +} + func testAccIntegrationConfig_optional(rName string) string { return acctest.ConfigCompose(testAccIntegrationConfig_base(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { diff --git a/website/docs/r/rds_integration.html.markdown b/website/docs/r/rds_integration.html.markdown index d9c47107aa30..a9935775c1d6 100644 --- a/website/docs/r/rds_integration.html.markdown +++ b/website/docs/r/rds_integration.html.markdown @@ -92,7 +92,7 @@ resource "aws_rds_integration" "example" { The following arguments are required: -* `integration_name` - (Required, Forces new resources) Name of the integration. +* `integration_name` - (Required) Name of the integration. * `source_arn` - (Required, Forces new resources) ARN of the database to use as the source for replication. * `target_arn` - (Required, Forces new resources) ARN of the Redshift data warehouse to use as the target for replication. @@ -102,7 +102,7 @@ The following arguments are optional: * `additional_encryption_context` - (Optional, Forces new resources) Set of non-secret key–value pairs that contains additional contextual information about the data. For more information, see the [User Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). You can only include this parameter if you specify the `kms_key_id` parameter. -* `data_filter` - (Optional, Forces new resources) Data filters for the integration. +* `data_filter` - (Optional) Data filters for the integration. These filters determine which tables from the source database are sent to the target Amazon Redshift data warehouse. The value should match the syntax from the AWS CLI which includes an `include:` or `exclude:` prefix before a filter expression. Multiple expressions are separated by a comma. @@ -127,7 +127,7 @@ This resource exports the following attributes in addition to the arguments abov [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): * `create` - (Default `60m`) -* `update` - (Default `10m`) +* `update` - (Default `30m`) * `delete` - (Default `30m`) ## Import