Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Performance Insights options to aws_rds_cluster resource #29415

Merged
3 changes: 3 additions & 0 deletions .changelog/29415.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/aws_rds_cluster: Add `performance_insights_enabled`, `performance_insights_kms_key_id`, and `performance_insights_retention_period` arguments
```
43 changes: 43 additions & 0 deletions internal/service/rds/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,21 @@ func resourceCluster() *schema.Resource {
Computed: true,
ValidateFunc: validation.StringInSlice(NetworkType_Values(), false),
},
"performance_insights_enabled": {
Type: schema.TypeBool,
Optional: true,
},
"performance_insights_kms_key_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: verify.ValidARN,
},
"performance_insights_retention_period": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
names.AttrPort: {
Type: schema.TypeInt,
Optional: true,
Expand Down Expand Up @@ -1133,6 +1148,18 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int
input.NetworkType = aws.String(v.(string))
}

if v, ok := d.GetOk("performance_insights_enabled"); ok {
input.EnablePerformanceInsights = aws.Bool(v.(bool))
}

if v, ok := d.GetOk("performance_insights_kms_key_id"); ok {
input.PerformanceInsightsKMSKeyId = aws.String(v.(string))
}

if v, ok := d.GetOk("performance_insights_retention_period"); ok {
input.PerformanceInsightsRetentionPeriod = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk(names.AttrPort); ok {
input.Port = aws.Int64(int64(v.(int)))
}
Expand Down Expand Up @@ -1304,6 +1331,9 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter
}
d.Set("master_username", dbc.MasterUsername)
d.Set("network_type", dbc.NetworkType)
d.Set("performance_insights_enabled", dbc.PerformanceInsightsEnabled)
d.Set("performance_insights_kms_key_id", dbc.PerformanceInsightsKMSKeyId)
d.Set("performance_insights_retention_period", dbc.PerformanceInsightsRetentionPeriod)
d.Set(names.AttrPort, dbc.Port)
d.Set("preferred_backup_window", dbc.PreferredBackupWindow)
d.Set(names.AttrPreferredMaintenanceWindow, dbc.PreferredMaintenanceWindow)
Expand Down Expand Up @@ -1480,6 +1510,18 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int
input.NetworkType = aws.String(d.Get("network_type").(string))
}

if d.HasChange("performance_insights_enabled") {
input.EnablePerformanceInsights = aws.Bool(d.Get("performance_insights_enabled").(bool))
}

if d.HasChange("performance_insights_kms_key_id") {
input.PerformanceInsightsKMSKeyId = aws.String(d.Get("performance_insights_kms_key_id").(string))
}

if d.HasChange("performance_insights_retention_period") {
input.PerformanceInsightsRetentionPeriod = aws.Int64(int64(d.Get("performance_insights_retention_period").(int)))
}

if d.HasChange(names.AttrPort) {
input.Port = aws.Int64(int64(d.Get(names.AttrPort).(int)))
}
Expand Down Expand Up @@ -1870,6 +1912,7 @@ func waitDBClusterUpdated(ctx context.Context, conn *rds.RDS, id string, waitNoP
pendingStatuses := []string{
clusterStatusBackingUp,
clusterStatusConfiguringIAMDatabaseAuth,
clusterStatusConfiguringEnhancedMonitoring,
clusterStatusModifying,
clusterStatusRenaming,
clusterStatusResettingMasterCredentials,
Expand Down
147 changes: 147 additions & 0 deletions internal/service/rds/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2705,6 +2705,95 @@ func TestAccRDSCluster_engineLifecycleSupport_disabled(t *testing.T) {
})
}

func TestAccRDSCluster_performanceInsightsEnabled(t *testing.T) {
ctx := acctest.Context(t)
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}

var dbCluster rds.DBCluster
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
resourceName := "aws_rds_cluster.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckClusterDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccClusterConfig_performanceInsightsEnabled(rName, true),
Check: resource.ComposeTestCheckFunc(
testAccCheckClusterExists(ctx, resourceName, &dbCluster),
resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue),
),
},
{
Config: testAccClusterConfig_performanceInsightsEnabled(rName, false),
Check: resource.ComposeTestCheckFunc(
testAccCheckClusterExists(ctx, resourceName, &dbCluster),
resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtFalse),
),
},
},
})
}

func TestAccRDSCluster_performanceInsightsKMSKeyID(t *testing.T) {
ctx := acctest.Context(t)
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}

var dbCluster rds.DBCluster
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
resourceName := "aws_rds_cluster.test"
kmsKeyResourceName := "aws_kms_key.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckClusterDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccClusterConfig_performanceInsightsKMSKeyID(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckClusterExists(ctx, resourceName, &dbCluster),
resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", kmsKeyResourceName, names.AttrARN),
),
},
},
})
}

func TestAccRDSCluster_performanceInsightsRetentionPeriod(t *testing.T) {
ctx := acctest.Context(t)
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}

var dbCluster rds.DBCluster
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
resourceName := "aws_rds_cluster.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckClusterDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccClusterConfig_performanceInsightsRetentionPeriod(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckClusterExists(ctx, resourceName, &dbCluster),
resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "62"),
),
},
},
})
}

func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc {
return func(s *terraform.State) error {
return testAccCheckClusterDestroyWithProvider(ctx)(s, acctest.Provider)
Expand Down Expand Up @@ -5379,3 +5468,61 @@ resource "aws_rds_cluster" "test" {
}
`, rName, tfrds.ClusterEngineAuroraPostgreSQL)
}

func testAccClusterConfig_performanceInsightsEnabled(rName string, performanceInsightsEnabled bool) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "test" {
cluster_identifier = %[1]q
engine = %[3]q
db_cluster_instance_class = "db.m6gd.large"
storage_type = "io1"
allocated_storage = 100
iops = 1000
master_username = "tfacctest"
master_password = "avoid-plaintext-passwords"
skip_final_snapshot = true
performance_insights_enabled = %[2]t
}
`, rName, performanceInsightsEnabled, tfrds.ClusterEngineMySQL)
}

func testAccClusterConfig_performanceInsightsKMSKeyID(rName string) string {
return fmt.Sprintf(`
resource "aws_kms_key" "test" {
description = %[1]q
deletion_window_in_days = 7
}

resource "aws_rds_cluster" "test" {
cluster_identifier = %[1]q
engine = %[2]q
db_cluster_instance_class = "db.m6gd.large"
storage_type = "io1"
allocated_storage = 100
iops = 1000
master_username = "tfacctest"
master_password = "avoid-plaintext-passwords"
skip_final_snapshot = true
performance_insights_enabled = true
performance_insights_kms_key_id = aws_kms_key.test.arn
}
`, rName, tfrds.ClusterEngineMySQL)
}

func testAccClusterConfig_performanceInsightsRetentionPeriod(rName string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "test" {
cluster_identifier = %[1]q
engine = %[2]q
db_cluster_instance_class = "db.m6gd.large"
storage_type = "io1"
allocated_storage = 100
iops = 1000
master_username = "tfacctest"
master_password = "avoid-plaintext-passwords"
skip_final_snapshot = true
performance_insights_enabled = true
performance_insights_retention_period = 62
}
`, rName, tfrds.ClusterEngineMySQL)
}
29 changes: 15 additions & 14 deletions internal/service/rds/consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,21 @@ const (
)

const (
clusterStatusAvailable = "available"
clusterStatusBackingUp = "backing-up"
clusterStatusConfiguringIAMDatabaseAuth = "configuring-iam-database-auth"
clusterStatusCreating = "creating"
clusterStatusDeleting = "deleting"
clusterStatusMigrating = "migrating"
clusterStatusModifying = "modifying"
clusterStatusPreparingDataMigration = "preparing-data-migration"
clusterStatusPromoting = "promoting"
clusterStatusRebooting = "rebooting"
clusterStatusRenaming = "renaming"
clusterStatusResettingMasterCredentials = "resetting-master-credentials"
clusterStatusScalingCompute = "scaling-compute"
clusterStatusUpgrading = "upgrading"
clusterStatusAvailable = "available"
clusterStatusBackingUp = "backing-up"
clusterStatusConfiguringEnhancedMonitoring = "configuring-enhanced-monitoring"
clusterStatusConfiguringIAMDatabaseAuth = "configuring-iam-database-auth"
clusterStatusCreating = "creating"
clusterStatusDeleting = "deleting"
clusterStatusMigrating = "migrating"
clusterStatusModifying = "modifying"
clusterStatusPreparingDataMigration = "preparing-data-migration"
clusterStatusPromoting = "promoting"
clusterStatusRebooting = "rebooting"
clusterStatusRenaming = "renaming"
clusterStatusResettingMasterCredentials = "resetting-master-credentials"
clusterStatusScalingCompute = "scaling-compute"
clusterStatusUpgrading = "upgrading"

// Non-standard status values.
clusterStatusAvailableWithPendingModifiedValues = "tf-available-with-pending-modified-values"
Expand Down
46 changes: 22 additions & 24 deletions internal/service/rds/event_categories_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,23 @@ package rds

import (
"context"
"slices"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/rds"
"github.com/aws/aws-sdk-go-v2/service/rds/types"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/hashicorp/terraform-provider-aws/internal/conns"
"github.com/hashicorp/terraform-provider-aws/internal/enum"
"github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag"
tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices"
"github.com/hashicorp/terraform-provider-aws/internal/tfresource"
"github.com/hashicorp/terraform-provider-aws/names"
)

// @SDKDataSource("aws_db_event_categories")
func DataSourceEventCategories() *schema.Resource {
// @SDKDataSource("aws_db_event_categories", name="Event Categories")
func dataSourceEventCategories() *schema.Resource {
return &schema.Resource{
ReadWithoutTimeout: dataSourceEventCategoriesRead,

Expand All @@ -28,17 +32,17 @@ func DataSourceEventCategories() *schema.Resource {
Elem: &schema.Schema{Type: schema.TypeString},
},
names.AttrSourceType: {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice(rds.SourceType_Values(), false),
Type: schema.TypeString,
Optional: true,
ValidateDiagFunc: enum.Validate[types.SourceType](),
},
},
}
}

func dataSourceEventCategoriesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
var diags diag.Diagnostics
conn := meta.(*conns.AWSClient).RDSConn(ctx)
conn := meta.(*conns.AWSClient).RDSClient(ctx)

input := &rds.DescribeEventCategoriesInput{}

Expand All @@ -47,35 +51,29 @@ func dataSourceEventCategoriesRead(ctx context.Context, d *schema.ResourceData,
}

output, err := findEventCategoriesMaps(ctx, conn, input)

if err != nil {
return sdkdiag.AppendErrorf(diags, "reading RDS Event Categories: %s", err)
}

var eventCategories []string

for _, v := range output {
eventCategories = append(eventCategories, aws.StringValueSlice(v.EventCategories)...)
}

d.SetId(meta.(*conns.AWSClient).Region)
d.Set("event_categories", eventCategories)
d.Set("event_categories", slices.Concat(tfslices.ApplyToAll(output, func(v types.EventCategoriesMap) []string {
return v.EventCategories
})...))

return diags
}

func findEventCategoriesMaps(ctx context.Context, conn *rds.RDS, input *rds.DescribeEventCategoriesInput) ([]*rds.EventCategoriesMap, error) {
var output []*rds.EventCategoriesMap
func findEventCategoriesMaps(ctx context.Context, conn *rds.Client, input *rds.DescribeEventCategoriesInput) ([]types.EventCategoriesMap, error) {
output, err := conn.DescribeEventCategories(ctx, input)

page, err := conn.DescribeEventCategoriesWithContext(ctx, input)
if err != nil {
return nil, err
}

for _, v := range page.EventCategoriesMapList {
if v != nil {
output = append(output, v)
}
if output == nil {
return nil, tfresource.NewEmptyResultError(input)
}

return output, nil
return output.EventCategoriesMapList, nil
}
Loading
Loading