Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .changelog/45632.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
release-note:enhancement
resource/aws_rds_integration: Add in-place update support for `integration_name` and `data_filter` arguments
70 changes: 66 additions & 4 deletions internal/service/rds/integration.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ func newIntegrationResource(_ context.Context) (resource.ResourceWithConfigure,
r := &integrationResource{}

r.SetDefaultCreateTimeout(60 * time.Minute)
r.SetDefaultUpdateTimeout(30 * time.Minute)
r.SetDefaultDeleteTimeout(30 * time.Minute)

return r, nil
Expand Down Expand Up @@ -80,16 +81,12 @@ func (r *integrationResource) Schema(ctx context.Context, request resource.Schem
Optional: true,
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
},
names.AttrID: framework.IDAttributeDeprecatedWithAlternate(path.Root(names.AttrARN)),
"integration_name": schema.StringAttribute{
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
names.AttrKMSKeyID: schema.StringAttribute{
Optional: true,
Expand Down Expand Up @@ -119,6 +116,7 @@ func (r *integrationResource) Schema(ctx context.Context, request resource.Schem
Blocks: map[string]schema.Block{
names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{
Create: true,
Update: true,
Delete: true,
}),
},
Expand Down Expand Up @@ -220,6 +218,51 @@ func (r *integrationResource) Read(ctx context.Context, request resource.ReadReq
response.Diagnostics.Append(response.State.Set(ctx, &data)...)
}

func (r *integrationResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) {
var new, old integrationResourceModel
response.Diagnostics.Append(request.Plan.Get(ctx, &new)...)
if response.Diagnostics.HasError() {
return
}
response.Diagnostics.Append(request.State.Get(ctx, &old)...)
if response.Diagnostics.HasError() {
return
}

conn := r.Meta().RDSClient(ctx)

if !new.DataFilter.Equal(old.DataFilter) ||
!new.IntegrationName.Equal(old.IntegrationName) {
input := &rds.ModifyIntegrationInput{
IntegrationIdentifier: fwflex.StringFromFramework(ctx, old.ID),
}

if !new.DataFilter.Equal(old.DataFilter) {
input.DataFilter = fwflex.StringFromFramework(ctx, new.DataFilter)
}

if !new.IntegrationName.Equal(old.IntegrationName) {
input.IntegrationName = fwflex.StringFromFramework(ctx, new.IntegrationName)
}

_, err := conn.ModifyIntegration(ctx, input)

if err != nil {
response.Diagnostics.AddError(fmt.Sprintf("updating RDS Integration (%s)", old.ID.ValueString()), err.Error())

return
}

if _, err := waitIntegrationUpdated(ctx, conn, old.ID.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)); err != nil {
response.Diagnostics.AddError(fmt.Sprintf("waiting for RDS Integration (%s) update", old.ID.ValueString()), err.Error())

return
}
}

response.Diagnostics.Append(response.State.Set(ctx, &new)...)
}

func (r *integrationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) {
var data integrationResourceModel
response.Diagnostics.Append(request.State.Get(ctx, &data)...)
Expand Down Expand Up @@ -331,6 +374,25 @@ func waitIntegrationCreated(ctx context.Context, conn *rds.Client, arn string, t
return nil, err
}

func waitIntegrationUpdated(ctx context.Context, conn *rds.Client, arn string, timeout time.Duration) (*awstypes.Integration, error) {
stateConf := &sdkretry.StateChangeConf{
Pending: []string{integrationStatusModifying},
Target: []string{integrationStatusActive},
Refresh: statusIntegration(ctx, conn, arn),
Timeout: timeout,
}

outputRaw, err := stateConf.WaitForStateContext(ctx)

if output, ok := outputRaw.(*awstypes.Integration); ok {
tfresource.SetLastError(err, errors.Join(tfslices.ApplyToAll(output.Errors, integrationError)...))

return output, err
}

return nil, err
}

func waitIntegrationDeleted(ctx context.Context, conn *rds.Client, arn string, timeout time.Duration) (*awstypes.Integration, error) {
stateConf := &sdkretry.StateChangeConf{
Pending: []string{integrationStatusDeleting, integrationStatusActive},
Expand Down
80 changes: 80 additions & 0 deletions internal/service/rds/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,86 @@ resource "aws_rds_integration" "test" {
`, rName))
}

func TestAccRDSIntegration_update(t *testing.T) {
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}

ctx := acctest.Context(t)
var integration awstypes.Integration
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
rNameUpdated := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
resourceName := "aws_rds_integration.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckIntegrationDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccIntegrationConfig_update(rName, rName, "include: *.*"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckIntegrationExists(ctx, resourceName, &integration),
resource.TestCheckResourceAttr(resourceName, "integration_name", rName),
resource.TestCheckResourceAttr(resourceName, "data_filter", "include: *.*"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
// Update integration_name
{
Config: testAccIntegrationConfig_update(rName, rNameUpdated, "include: *.*"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckIntegrationExists(ctx, resourceName, &integration),
resource.TestCheckResourceAttr(resourceName, "integration_name", rNameUpdated),
resource.TestCheckResourceAttr(resourceName, "data_filter", "include: *.*"),
),
},
// Update data_filter
{
Config: testAccIntegrationConfig_update(rName, rNameUpdated, "include: test.*"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckIntegrationExists(ctx, resourceName, &integration),
resource.TestCheckResourceAttr(resourceName, "integration_name", rNameUpdated),
resource.TestCheckResourceAttr(resourceName, "data_filter", "include: test.*"),
),
},
// Update both integration_name and data_filter
{
Config: testAccIntegrationConfig_update(rName, rName, "include: mydb.*"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckIntegrationExists(ctx, resourceName, &integration),
resource.TestCheckResourceAttr(resourceName, "integration_name", rName),
resource.TestCheckResourceAttr(resourceName, "data_filter", "include: mydb.*"),
),
},
},
})
}

func testAccIntegrationConfig_update(rName, integrationName, dataFilter string) string {
return acctest.ConfigCompose(testAccIntegrationConfig_base(rName), fmt.Sprintf(`
resource "aws_rds_integration" "test" {
integration_name = %[1]q
source_arn = aws_rds_cluster.test.arn
target_arn = aws_redshiftserverless_namespace.test.arn
data_filter = %[2]q

depends_on = [
aws_rds_cluster.test,
aws_rds_cluster_instance.test,
aws_redshiftserverless_namespace.test,
aws_redshiftserverless_workgroup.test,
aws_redshift_resource_policy.test,
]
}
`, integrationName, dataFilter))
}

func testAccIntegrationConfig_optional(rName string) string {
return acctest.ConfigCompose(testAccIntegrationConfig_base(rName), fmt.Sprintf(`
resource "aws_kms_key" "test" {
Expand Down
6 changes: 3 additions & 3 deletions website/docs/r/rds_integration.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ resource "aws_rds_integration" "example" {

The following arguments are required:

* `integration_name` - (Required, Forces new resources) Name of the integration.
* `integration_name` - (Required) Name of the integration.
* `source_arn` - (Required, Forces new resources) ARN of the database to use as the source for replication.
* `target_arn` - (Required, Forces new resources) ARN of the Redshift data warehouse to use as the target for replication.

Expand All @@ -102,7 +102,7 @@ The following arguments are optional:
* `additional_encryption_context` - (Optional, Forces new resources) Set of non-secret key–value pairs that contains additional contextual information about the data.
For more information, see the [User Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context).
You can only include this parameter if you specify the `kms_key_id` parameter.
* `data_filter` - (Optional, Forces new resources) Data filters for the integration.
* `data_filter` - (Optional) Data filters for the integration.
These filters determine which tables from the source database are sent to the target Amazon Redshift data warehouse.
The value should match the syntax from the AWS CLI which includes an `include:` or `exclude:` prefix before a filter expression.
Multiple expressions are separated by a comma.
Expand All @@ -127,7 +127,7 @@ This resource exports the following attributes in addition to the arguments abov
[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts):

* `create` - (Default `60m`)
* `update` - (Default `10m`)
* `update` - (Default `30m`)
* `delete` - (Default `30m`)

## Import
Expand Down
Loading