From c60c0bce32529f951f46dbb21d35cc544d48f4df Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Wed, 18 May 2022 18:17:20 +0300 Subject: [PATCH 1/9] aqua --- internal/service/redshift/cluster.go | 268 ++++++++++-------- internal/service/redshift/cluster_test.go | 75 +++++ internal/service/redshift/status.go | 32 +++ internal/service/redshift/wait.go | 40 +++ website/docs/r/redshift_cluster.html.markdown | 8 +- 5 files changed, 297 insertions(+), 126 deletions(-) diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 1c910d705e2c..94a1fb96b821 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -49,6 +49,17 @@ func ResourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "apply_immediately": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "aqua_configuration_status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(redshift.AquaConfigurationStatus_Values(), false), + }, "automated_snapshot_retention_period": { Type: schema.TypeInt, Optional: true, @@ -180,8 +191,10 @@ func ResourceCluster() *schema.Resource { Type: schema.TypeSet, Optional: true, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidARN, + }, }, "kms_key_id": { Type: schema.TypeString, @@ -245,13 +258,15 @@ func ResourceCluster() *schema.Resource { Default: 1, }, "owner_account": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidAccountID, }, "port": { - Type: schema.TypeInt, - Optional: true, - Default: 5439, + Type: schema.TypeInt, + Optional: true, + Default: 5439, + ValidateFunc: validation.IntBetween(1115, 65535), }, "preferred_maintenance_window": { Type: schema.TypeString, @@ -348,76 +363,107 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) - if v, ok := d.GetOk("snapshot_identifier"); ok { - clusterID := d.Get("cluster_identifier").(string) - input := &redshift.RestoreFromClusterSnapshotInput{ - AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), - AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), - ClusterIdentifier: aws.String(clusterID), - Port: aws.Int64(int64(d.Get("port").(int))), - NodeType: aws.String(d.Get("node_type").(string)), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - SnapshotIdentifier: aws.String(v.(string)), - } + clusterID := d.Get("cluster_identifier").(string) + backupInput := &redshift.RestoreFromClusterSnapshotInput{ + AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), + AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), + ClusterIdentifier: aws.String(clusterID), + Port: aws.Int64(int64(d.Get("port").(int))), + NodeType: aws.String(d.Get("node_type").(string)), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + } - if v, ok := d.GetOk("availability_zone"); ok { - input.AvailabilityZone = aws.String(v.(string)) - } + input := &redshift.CreateClusterInput{ + AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), + AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), + ClusterIdentifier: aws.String(clusterID), + ClusterVersion: aws.String(d.Get("cluster_version").(string)), + DBName: aws.String(d.Get("database_name").(string)), + MasterUsername: aws.String(d.Get("master_username").(string)), + MasterUserPassword: aws.String(d.Get("master_password").(string)), + NodeType: aws.String(d.Get("node_type").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + Tags: Tags(tags.IgnoreAWS()), + } - if v, ok := d.GetOk("availability_zone_relocation_enabled"); ok { - input.AvailabilityZoneRelocation = aws.Bool(v.(bool)) - } + if v, ok := d.GetOk("aqua_configuration_status"); ok { + backupInput.AquaConfigurationStatus = aws.String(v.(string)) + input.AquaConfigurationStatus = aws.String(v.(string)) + } - if v, ok := d.GetOk("cluster_subnet_group_name"); ok { - input.ClusterSubnetGroupName = aws.String(v.(string)) - } + if v, ok := d.GetOk("availability_zone"); ok { + backupInput.AvailabilityZone = aws.String(v.(string)) + input.AvailabilityZone = aws.String(v.(string)) + } - if v, ok := d.GetOk("cluster_parameter_group_name"); ok { - input.ClusterParameterGroupName = aws.String(v.(string)) - } + if v, ok := d.GetOk("availability_zone_relocation_enabled"); ok { + backupInput.AvailabilityZoneRelocation = aws.Bool(v.(bool)) + input.AvailabilityZoneRelocation = aws.Bool(v.(bool)) + } - if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { - input.ClusterSecurityGroups = flex.ExpandStringSet(v) - } + if v, ok := d.GetOk("cluster_subnet_group_name"); ok { + backupInput.ClusterSubnetGroupName = aws.String(v.(string)) + input.ClusterSubnetGroupName = aws.String(v.(string)) + } - if v, ok := d.GetOk("elastic_ip"); ok { - input.ElasticIp = aws.String(v.(string)) - } + if v, ok := d.GetOk("cluster_parameter_group_name"); ok { + backupInput.ClusterParameterGroupName = aws.String(v.(string)) + input.ClusterParameterGroupName = aws.String(v.(string)) + } - if v, ok := d.GetOk("enhanced_vpc_routing"); ok { - input.EnhancedVpcRouting = aws.Bool(v.(bool)) - } + if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { + backupInput.ClusterSecurityGroups = flex.ExpandStringSet(v) + input.ClusterSecurityGroups = flex.ExpandStringSet(v) + } - if v, ok := d.GetOk("iam_roles"); ok { - input.IamRoles = flex.ExpandStringSet(v.(*schema.Set)) - } + if v, ok := d.GetOk("elastic_ip"); ok { + backupInput.ElasticIp = aws.String(v.(string)) + input.ElasticIp = aws.String(v.(string)) + } - if v, ok := d.GetOk("kms_key_id"); ok { - input.KmsKeyId = aws.String(v.(string)) - } + if v, ok := d.GetOk("enhanced_vpc_routing"); ok { + backupInput.EnhancedVpcRouting = aws.Bool(v.(bool)) + input.EnhancedVpcRouting = aws.Bool(v.(bool)) + } - if v, ok := d.GetOk("number_of_nodes"); ok { - input.NumberOfNodes = aws.Int64(int64(v.(int))) - } + if v, ok := d.GetOk("iam_roles"); ok { + backupInput.IamRoles = flex.ExpandStringSet(v.(*schema.Set)) + input.IamRoles = flex.ExpandStringSet(v.(*schema.Set)) + } - if v, ok := d.GetOk("owner_account"); ok { - input.OwnerAccount = aws.String(v.(string)) - } + if v, ok := d.GetOk("kms_key_id"); ok { + backupInput.KmsKeyId = aws.String(v.(string)) + input.KmsKeyId = aws.String(v.(string)) + } - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - input.PreferredMaintenanceWindow = aws.String(v.(string)) - } + if v, ok := d.GetOk("number_of_nodes"); ok { + backupInput.NumberOfNodes = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + backupInput.PreferredMaintenanceWindow = aws.String(v.(string)) + input.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + backupInput.VpcSecurityGroupIds = flex.ExpandStringSet(v) + input.VpcSecurityGroupIds = flex.ExpandStringSet(v) + } + + if v, ok := d.GetOk("snapshot_identifier"); ok { + backupInput.SnapshotIdentifier = aws.String(v.(string)) if v, ok := d.GetOk("snapshot_cluster_identifier"); ok { - input.SnapshotClusterIdentifier = aws.String(v.(string)) + backupInput.SnapshotClusterIdentifier = aws.String(v.(string)) } - if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - input.VpcSecurityGroupIds = flex.ExpandStringSet(v) + if v, ok := d.GetOk("owner_account"); ok { + backupInput.OwnerAccount = aws.String(v.(string)) } - log.Printf("[DEBUG] Restoring Redshift Cluster: %s", input) - output, err := conn.RestoreFromClusterSnapshot(input) + log.Printf("[DEBUG] Restoring Redshift Cluster: %s", backupInput) + output, err := conn.RestoreFromClusterSnapshot(backupInput) if err != nil { return fmt.Errorf("error restoring Redshift Cluster (%s) from snapshot: %w", clusterID, err) @@ -433,61 +479,6 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf(`provider.aws: aws_redshift_cluster: %s: "master_username": required field is not set`, d.Get("cluster_identifier").(string)) } - clusterID := d.Get("cluster_identifier").(string) - input := &redshift.CreateClusterInput{ - AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), - AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), - ClusterIdentifier: aws.String(clusterID), - ClusterVersion: aws.String(d.Get("cluster_version").(string)), - DBName: aws.String(d.Get("database_name").(string)), - MasterUsername: aws.String(d.Get("master_username").(string)), - MasterUserPassword: aws.String(d.Get("master_password").(string)), - NodeType: aws.String(d.Get("node_type").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - Tags: Tags(tags.IgnoreAWS()), - } - - if v, ok := d.GetOk("availability_zone"); ok { - input.AvailabilityZone = aws.String(v.(string)) - } - - if v, ok := d.GetOk("availability_zone_relocation_enabled"); ok { - input.AvailabilityZoneRelocation = aws.Bool(v.(bool)) - } - - if v, ok := d.GetOk("cluster_parameter_group_name"); ok { - input.ClusterParameterGroupName = aws.String(v.(string)) - } - - if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { - input.ClusterSecurityGroups = flex.ExpandStringSet(v) - } - - if v, ok := d.GetOk("cluster_subnet_group_name"); ok { - input.ClusterSubnetGroupName = aws.String(v.(string)) - } - - if v, ok := d.GetOk("elastic_ip"); ok { - input.ElasticIp = aws.String(v.(string)) - } - - if v, ok := d.GetOk("encrypted"); ok { - input.Encrypted = aws.Bool(v.(bool)) - } - - if v, ok := d.GetOk("enhanced_vpc_routing"); ok { - input.EnhancedVpcRouting = aws.Bool(v.(bool)) - } - - if v, ok := d.GetOk("iam_roles"); ok { - input.IamRoles = flex.ExpandStringSet(v.(*schema.Set)) - } - - if v, ok := d.GetOk("kms_key_id"); ok { - input.KmsKeyId = aws.String(v.(string)) - } - if v := d.Get("number_of_nodes").(int); v > 1 { input.ClusterType = aws.String(clusterTypeMultiNode) input.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) @@ -495,14 +486,6 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { input.ClusterType = aws.String(clusterTypeSingleNode) } - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - input.PreferredMaintenanceWindow = aws.String(v.(string)) - } - - if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - input.VpcSecurityGroupIds = flex.ExpandStringSet(v) - } - log.Printf("[DEBUG] Creating Redshift Cluster: %s", input) output, err := conn.CreateCluster(input) @@ -576,6 +559,9 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { Resource: fmt.Sprintf("cluster:%s", d.Id()), }.String() d.Set("arn", arn) + if rsc.AquaConfiguration != nil { + d.Set("aqua_configuration_status", rsc.AquaConfiguration.AquaConfigurationStatus) + } d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod) d.Set("availability_zone", rsc.AvailabilityZone) azr, err := clusterAvailabilityZoneRelocationStatus(rsc) @@ -666,7 +652,7 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).RedshiftConn - if d.HasChangesExcept("availability_zone", "iam_roles", "logging", "snapshot_copy", "tags", "tags_all") { + if d.HasChangesExcept("aqua_configuration_status", "availability_zone", "iam_roles", "logging", "snapshot_copy", "tags", "tags_all") { input := &redshift.ModifyClusterInput{ ClusterIdentifier: aws.String(d.Id()), } @@ -784,6 +770,42 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("aqua_configuration_status") { + input := &redshift.ModifyAquaConfigurationInput{ + AquaConfigurationStatus: aws.String(d.Get("aqua_configuration_status").(string)), + ClusterIdentifier: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Modifying Redshift Cluster Aqua Configuration: %s", input) + _, err := conn.ModifyAquaConfiguration(input) + + if err != nil { + return fmt.Errorf("error modifying Redshift Cluster (%s) Aqua Configuration: %w", d.Id(), err) + } + + if d.Get("apply_immediately").(bool) { + rebootInput := &redshift.RebootClusterInput{ + ClusterIdentifier: aws.String(d.Id()), + } + + _, err = verify.RetryOnAWSCode(redshift.ErrCodeInvalidClusterStateFault, func() (interface{}, error) { + return conn.RebootCluster(rebootInput) + }) + + if err != nil { + return fmt.Errorf("error rebooting Redshift Cluster (%s): %w", d.Id(), err) + } + + if _, err := waitClusterRebooted(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("error waiting for Redshift Cluster (%s) Rebooted: %w", d.Id(), err) + } + + if _, err := waitClusterAquaApplied(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("error waiting for Redshift Cluster (%s) Aqua Configuration update: %w", d.Id(), err) + } + } + } + // Availability Zone cannot be changed at the same time as other settings if d.HasChange("availability_zone") { input := &redshift.ModifyClusterInput{ diff --git a/internal/service/redshift/cluster_test.go b/internal/service/redshift/cluster_test.go index 762ea92843e0..6bf6a0bdafb0 100644 --- a/internal/service/redshift/cluster_test.go +++ b/internal/service/redshift/cluster_test.go @@ -40,6 +40,8 @@ func TestAccRedshiftCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "true"), resource.TestMatchResourceAttr(resourceName, "dns_name", regexp.MustCompile(fmt.Sprintf("^%s.*\\.redshift\\..*", rName))), resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "aqua_configuration_status", "auto"), + resource.TestCheckResourceAttr(resourceName, "tags.#", "0"), ), }, { @@ -50,12 +52,60 @@ func TestAccRedshiftCluster_basic(t *testing.T) { "final_snapshot_identifier", "master_password", "skip_final_snapshot", + "apply_immediately", }, }, }, }) } +func TestAccRedshiftCluster_aqua(t *testing.T) { + var v redshift.Cluster + resourceName := "aws_redshift_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_aqua(rName, "enabled"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "aqua_configuration_status", "enabled"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_snapshot_identifier", + "master_password", + "skip_final_snapshot", + "apply_immediately", + }, + }, + { + Config: testAccClusterConfig_aqua(rName, "disabled"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "aqua_configuration_status", "disabled"), + ), + }, + { + Config: testAccClusterConfig_aqua(rName, "enabled"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "aqua_configuration_status", "enabled"), + ), + }, + }, + }) +} + func TestAccRedshiftCluster_disappears(t *testing.T) { var v redshift.Cluster resourceName := "aws_redshift_cluster.test" @@ -104,6 +154,7 @@ func TestAccRedshiftCluster_withFinalSnapshot(t *testing.T) { "final_snapshot_identifier", "master_password", "skip_final_snapshot", + "apply_immediately", }, }, }, @@ -139,6 +190,7 @@ func TestAccRedshiftCluster_kmsKey(t *testing.T) { "final_snapshot_identifier", "master_password", "skip_final_snapshot", + "apply_immediately", }, }, }, @@ -171,6 +223,7 @@ func TestAccRedshiftCluster_enhancedVPCRoutingEnabled(t *testing.T) { "final_snapshot_identifier", "master_password", "skip_final_snapshot", + "apply_immediately", }, }, { @@ -212,6 +265,7 @@ func TestAccRedshiftCluster_loggingEnabled(t *testing.T) { "final_snapshot_identifier", "master_password", "skip_final_snapshot", + "apply_immediately", }, }, { @@ -405,6 +459,7 @@ func TestAccRedshiftCluster_tags(t *testing.T) { "final_snapshot_identifier", "master_password", "skip_final_snapshot", + "apply_immediately", }, }, { @@ -639,6 +694,7 @@ func TestAccRedshiftCluster_availabilityZoneRelocation(t *testing.T) { "final_snapshot_identifier", "master_password", "skip_final_snapshot", + "apply_immediately", }, }, { @@ -711,6 +767,7 @@ func TestAccRedshiftCluster_restoreFromSnapshot(t *testing.T) { "master_password", "skip_final_snapshot", "snapshot_identifier", + "apply_immediately", }, }, }, @@ -892,6 +949,24 @@ resource "aws_redshift_cluster" "test" { `, rName)) } +func testAccClusterConfig_aqua(rName, status string) string { + return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptInExclude("usw2-az2"), fmt.Sprintf(` +resource "aws_redshift_cluster" "test" { + cluster_identifier = %[1]q + availability_zone = data.aws_availability_zones.available.names[0] + database_name = "mydb" + master_username = "foo_test" + master_password = "Mustbe8characters" + node_type = "ra3.xlplus" + automated_snapshot_retention_period = 1 + allow_version_upgrade = false + skip_final_snapshot = true + aqua_configuration_status = %[2]q + apply_immediately = true +} +`, rName, status)) +} + func testAccClusterConfig_encrypted(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptInExclude("usw2-az2"), fmt.Sprintf(` resource "aws_kms_key" "test" { diff --git a/internal/service/redshift/status.go b/internal/service/redshift/status.go index 0d58add3948c..aca5457a7ba3 100644 --- a/internal/service/redshift/status.go +++ b/internal/service/redshift/status.go @@ -38,3 +38,35 @@ func statusClusterAvailabilityZoneRelocation(conn *redshift.Redshift, id string) return output, aws.StringValue(output.AvailabilityZoneRelocationStatus), nil } } + +func statusCluster(conn *redshift.Redshift, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindClusterByID(conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.ClusterStatus), nil + } +} + +func statusClusterAqua(conn *redshift.Redshift, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindClusterByID(conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.AquaConfiguration.AquaStatus), nil + } +} diff --git a/internal/service/redshift/wait.go b/internal/service/redshift/wait.go index 698b7ee34d0f..3245ad07272e 100644 --- a/internal/service/redshift/wait.go +++ b/internal/service/redshift/wait.go @@ -90,3 +90,43 @@ func waitClusterRelocationStatusResolved(conn *redshift.Redshift, id string) (*r return nil, err } + +func waitClusterRebooted(conn *redshift.Redshift, id string, timeout time.Duration) (*redshift.Cluster, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{clusterStatusRebooting}, + Target: []string{clusterStatusAvailable}, + Refresh: statusCluster(conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*redshift.Cluster); ok { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.ClusterStatus))) + + return output, err + } + + return nil, err +} + +func waitClusterAquaApplied(conn *redshift.Redshift, id string, timeout time.Duration) (*redshift.Cluster, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{redshift.AquaStatusApplying}, + Target: []string{redshift.AquaStatusDisabled, redshift.AquaStatusEnabled}, + Refresh: statusClusterAqua(conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*redshift.Cluster); ok { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.ClusterStatus))) + + return output, err + } + + return nil, err +} diff --git a/website/docs/r/redshift_cluster.html.markdown b/website/docs/r/redshift_cluster.html.markdown index c0a1cabebc6d..e1cf49878860 100644 --- a/website/docs/r/redshift_cluster.html.markdown +++ b/website/docs/r/redshift_cluster.html.markdown @@ -51,13 +51,15 @@ The following arguments are supported: Format: ddd:hh24:mi-ddd:hh24:mi * `cluster_parameter_group_name` - (Optional) The name of the parameter group to be associated with this cluster. * `automated_snapshot_retention_period` - (Optional) The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with create-cluster-snapshot. Default is 1. -* `port` - (Optional) The port number on which the cluster accepts incoming connections. +* `port` - (Optional) The port number on which the cluster accepts incoming connections. Valid values are between `1115` and `65535`. The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. - Default port is 5439. + Default port is `5439`. * `cluster_version` - (Optional) The version of the Amazon Redshift engine software that you want to deploy on the cluster. The version selected runs on all the nodes in the cluster. -* `allow_version_upgrade` - (Optional) If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default is true +* `allow_version_upgrade` - (Optional) If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default is `true`. +* `apply_immediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. +* `aqua_configuration_status` - (Optional) The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. Possible values are `enabled`, `disabled`, and `auto`. Requires Cluster reboot. * `number_of_nodes` - (Optional) The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1. * `publicly_accessible` - (Optional) If true, the cluster can be accessed from a public network. Default is `true`. * `encrypted` - (Optional) If true , the data in the cluster is encrypted at rest. From 4a283c8fd7932206a42180fcda89cbd2d3bd8bf6 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Wed, 18 May 2022 18:21:19 +0300 Subject: [PATCH 2/9] changelog --- .changelog/24856.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/24856.txt diff --git a/.changelog/24856.txt b/.changelog/24856.txt new file mode 100644 index 000000000000..e86ee51e954b --- /dev/null +++ b/.changelog/24856.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_redshift_cluster: Add plan-time validation for `iam_roles`, `owner_account`, and `port`. +``` + +```release-note:enhancement +resource/aws_redshift_cluster: Add `aqua_configuration_status` and `apply_immediately` arguments. +``` \ No newline at end of file From 5321737037af82a89aaeed187baf7357174abdb4 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 19 May 2022 10:02:19 +0300 Subject: [PATCH 3/9] retry --- internal/service/redshift/cluster.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 94a1fb96b821..59609abcc0a6 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -788,9 +788,13 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { ClusterIdentifier: aws.String(d.Id()), } - _, err = verify.RetryOnAWSCode(redshift.ErrCodeInvalidClusterStateFault, func() (interface{}, error) { - return conn.RebootCluster(rebootInput) - }) + _, err := tfresource.RetryWhenAWSErrCodeEquals( + clusterInvalidClusterStateFaultTimeout, + func() (interface{}, error) { + return conn.RebootCluster(rebootInput) + }, + redshift.ErrCodeInvalidClusterStateFault, + ) if err != nil { return fmt.Errorf("error rebooting Redshift Cluster (%s): %w", d.Id(), err) From bcd8e4b26bd5062a674834220afdc633bb6fe185 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 19 May 2022 10:44:14 +0300 Subject: [PATCH 4/9] retry --- internal/service/redshift/wait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/redshift/wait.go b/internal/service/redshift/wait.go index 3245ad07272e..26fb5f01c851 100644 --- a/internal/service/redshift/wait.go +++ b/internal/service/redshift/wait.go @@ -93,7 +93,7 @@ func waitClusterRelocationStatusResolved(conn *redshift.Redshift, id string) (*r func waitClusterRebooted(conn *redshift.Redshift, id string, timeout time.Duration) (*redshift.Cluster, error) { stateConf := &resource.StateChangeConf{ - Pending: []string{clusterStatusRebooting}, + Pending: []string{clusterStatusRebooting, clusterStatusModifying}, Target: []string{clusterStatusAvailable}, Refresh: statusCluster(conn, id), Timeout: timeout, From f880aa1bc08c51776726b5ea94e78bdec70419a5 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 19 May 2022 12:07:55 +0300 Subject: [PATCH 5/9] datasource --- internal/service/redshift/cluster_data_source.go | 8 +++++++- website/docs/d/redshift_cluster.html.markdown | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/internal/service/redshift/cluster_data_source.go b/internal/service/redshift/cluster_data_source.go index 15db32922bf6..92ecbdcc4d55 100644 --- a/internal/service/redshift/cluster_data_source.go +++ b/internal/service/redshift/cluster_data_source.go @@ -31,7 +31,10 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - + "aqua_configuration_status": { + Type: schema.TypeString, + Computed: true, + }, "availability_zone": { Type: schema.TypeString, Computed: true, @@ -200,6 +203,9 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.SetId(cluster) d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade) d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod) + if rsc.AquaConfiguration != nil { + d.Set("aqua_configuration_status", rsc.AquaConfiguration.AquaConfigurationStatus) + } d.Set("availability_zone", rsc.AvailabilityZone) azr, err := clusterAvailabilityZoneRelocationStatus(rsc) if err != nil { diff --git a/website/docs/d/redshift_cluster.html.markdown b/website/docs/d/redshift_cluster.html.markdown index ffd98b3de546..5064f771958e 100644 --- a/website/docs/d/redshift_cluster.html.markdown +++ b/website/docs/d/redshift_cluster.html.markdown @@ -53,6 +53,7 @@ In addition to all arguments above, the following attributes are exported: * `allow_version_upgrade` - Whether major version upgrades can be applied during maintenance period * `automated_snapshot_retention_period` - The backup retention period +* `aqua_configuration_status` - The value represents how the cluster is configured to use AQUA. * `availability_zone` - The availability zone of the cluster * `availability_zone_relocation_enabled` - Indicates whether the cluster is able to be relocated to another availability zone. * `bucket_name` - The name of the S3 bucket where the log files are to be stored From b5cec2effc18d7a3bed7488f8c9fde0e3132807d Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 19 May 2022 12:08:27 +0300 Subject: [PATCH 6/9] changelog --- .changelog/24856.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.changelog/24856.txt b/.changelog/24856.txt index e86ee51e954b..0e4291d5978c 100644 --- a/.changelog/24856.txt +++ b/.changelog/24856.txt @@ -4,4 +4,8 @@ resource/aws_redshift_cluster: Add plan-time validation for `iam_roles`, `owner_ ```release-note:enhancement resource/aws_redshift_cluster: Add `aqua_configuration_status` and `apply_immediately` arguments. +``` + +```release-note:enhancement +data-source/aws_redshift_cluster: Add `aqua_configuration_status` attribute. ``` \ No newline at end of file From 155ad393aea5e5218e3131996db8aea1c066d57a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 23 May 2022 13:45:34 -0400 Subject: [PATCH 7/9] r/aws_redshift_cluster: Alphabetize attributes. --- internal/service/redshift/cluster.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 59609abcc0a6..1296e8190c9b 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -45,10 +45,6 @@ func ResourceCluster() *schema.Resource { Optional: true, Default: true, }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, "apply_immediately": { Type: schema.TypeBool, Optional: true, @@ -60,6 +56,10 @@ func ResourceCluster() *schema.Resource { Computed: true, ValidateFunc: validation.StringInSlice(redshift.AquaConfigurationStatus_Values(), false), }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, "automated_snapshot_retention_period": { Type: schema.TypeInt, Optional: true, @@ -126,7 +126,6 @@ func ResourceCluster() *schema.Resource { Optional: true, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, "cluster_subnet_group_name": { Type: schema.TypeString, @@ -329,7 +328,6 @@ func ResourceCluster() *schema.Resource { Optional: true, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, }, From fabc63da13b5f9177603b029e0c5ccb96d21d597 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 23 May 2022 13:47:29 -0400 Subject: [PATCH 8/9] d/aws_redshift_cluster: Alphabetize attributes. --- .../service/redshift/cluster_data_source.go | 46 ++++--------------- 1 file changed, 8 insertions(+), 38 deletions(-) diff --git a/internal/service/redshift/cluster_data_source.go b/internal/service/redshift/cluster_data_source.go index 92ecbdcc4d55..2d0a741e7ba9 100644 --- a/internal/service/redshift/cluster_data_source.go +++ b/internal/service/redshift/cluster_data_source.go @@ -16,159 +16,129 @@ func DataSourceCluster() *schema.Resource { Read: dataSourceClusterRead, Schema: map[string]*schema.Schema{ - - "cluster_identifier": { - Type: schema.TypeString, - Required: true, - }, - "allow_version_upgrade": { Type: schema.TypeBool, Computed: true, }, - - "automated_snapshot_retention_period": { - Type: schema.TypeInt, - Computed: true, - }, "aqua_configuration_status": { Type: schema.TypeString, Computed: true, }, + "automated_snapshot_retention_period": { + Type: schema.TypeInt, + Computed: true, + }, "availability_zone": { Type: schema.TypeString, Computed: true, }, - "availability_zone_relocation_enabled": { Type: schema.TypeBool, Computed: true, }, - "bucket_name": { Type: schema.TypeString, Computed: true, }, - + "cluster_identifier": { + Type: schema.TypeString, + Required: true, + }, "cluster_parameter_group_name": { Type: schema.TypeString, Computed: true, }, - "cluster_public_key": { Type: schema.TypeString, Computed: true, }, - "cluster_revision_number": { Type: schema.TypeString, Computed: true, }, - "cluster_security_groups": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "cluster_subnet_group_name": { Type: schema.TypeString, Computed: true, }, - "cluster_type": { Type: schema.TypeString, Computed: true, }, - "cluster_version": { Type: schema.TypeString, Computed: true, }, - "database_name": { Type: schema.TypeString, Computed: true, }, - "elastic_ip": { Type: schema.TypeString, Computed: true, }, - "enable_logging": { Type: schema.TypeBool, Computed: true, }, - "encrypted": { Type: schema.TypeBool, Computed: true, }, - "endpoint": { Type: schema.TypeString, Computed: true, }, - "enhanced_vpc_routing": { Type: schema.TypeBool, Computed: true, }, - "iam_roles": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "kms_key_id": { Type: schema.TypeString, Computed: true, }, - "master_username": { Type: schema.TypeString, Computed: true, }, - "node_type": { Type: schema.TypeString, Computed: true, }, - "number_of_nodes": { Type: schema.TypeInt, Computed: true, }, - "port": { Type: schema.TypeInt, Computed: true, }, - "preferred_maintenance_window": { Type: schema.TypeString, Computed: true, }, - "publicly_accessible": { Type: schema.TypeBool, Computed: true, }, - "s3_key_prefix": { Type: schema.TypeString, Computed: true, }, - "tags": tftags.TagsSchema(), - "vpc_id": { Type: schema.TypeString, Computed: true, }, - "vpc_security_group_ids": { Type: schema.TypeList, Computed: true, From 18b0416d489a6e488773ee36ee74c8528eabc136 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 23 May 2022 13:54:22 -0400 Subject: [PATCH 9/9] r/aws_redshift_cluster: Don't forget to set Encrypted when creating the cluster. --- internal/service/redshift/cluster.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 1296e8190c9b..3598eda988a5 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -400,11 +400,6 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { input.AvailabilityZoneRelocation = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("cluster_subnet_group_name"); ok { - backupInput.ClusterSubnetGroupName = aws.String(v.(string)) - input.ClusterSubnetGroupName = aws.String(v.(string)) - } - if v, ok := d.GetOk("cluster_parameter_group_name"); ok { backupInput.ClusterParameterGroupName = aws.String(v.(string)) input.ClusterParameterGroupName = aws.String(v.(string)) @@ -415,6 +410,11 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { input.ClusterSecurityGroups = flex.ExpandStringSet(v) } + if v, ok := d.GetOk("cluster_subnet_group_name"); ok { + backupInput.ClusterSubnetGroupName = aws.String(v.(string)) + input.ClusterSubnetGroupName = aws.String(v.(string)) + } + if v, ok := d.GetOk("elastic_ip"); ok { backupInput.ElasticIp = aws.String(v.(string)) input.ElasticIp = aws.String(v.(string)) @@ -437,6 +437,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { if v, ok := d.GetOk("number_of_nodes"); ok { backupInput.NumberOfNodes = aws.Int64(int64(v.(int))) + // NumberOfNodes set below for CreateCluster. } if v, ok := d.GetOk("preferred_maintenance_window"); ok { @@ -452,14 +453,14 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { if v, ok := d.GetOk("snapshot_identifier"); ok { backupInput.SnapshotIdentifier = aws.String(v.(string)) - if v, ok := d.GetOk("snapshot_cluster_identifier"); ok { - backupInput.SnapshotClusterIdentifier = aws.String(v.(string)) - } - if v, ok := d.GetOk("owner_account"); ok { backupInput.OwnerAccount = aws.String(v.(string)) } + if v, ok := d.GetOk("snapshot_cluster_identifier"); ok { + backupInput.SnapshotClusterIdentifier = aws.String(v.(string)) + } + log.Printf("[DEBUG] Restoring Redshift Cluster: %s", backupInput) output, err := conn.RestoreFromClusterSnapshot(backupInput) @@ -477,6 +478,10 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf(`provider.aws: aws_redshift_cluster: %s: "master_username": required field is not set`, d.Get("cluster_identifier").(string)) } + if v, ok := d.GetOk("encrypted"); ok { + input.Encrypted = aws.Bool(v.(bool)) + } + if v := d.Get("number_of_nodes").(int); v > 1 { input.ClusterType = aws.String(clusterTypeMultiNode) input.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int)))