|
| 1 | +package aws |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "log" |
| 7 | + "regexp" |
| 8 | + "strings" |
| 9 | + |
| 10 | + "github.com/aws/aws-sdk-go/aws" |
| 11 | + "github.com/aws/aws-sdk-go/service/timestreamwrite" |
| 12 | + "github.com/hashicorp/aws-sdk-go-base/tfawserr" |
| 13 | + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" |
| 14 | + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" |
| 15 | + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" |
| 16 | + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" |
| 17 | +) |
| 18 | + |
| 19 | +func resourceAwsTimestreamWriteTable() *schema.Resource { |
| 20 | + return &schema.Resource{ |
| 21 | + CreateWithoutTimeout: resourceAwsTimestreamWriteTableCreate, |
| 22 | + ReadWithoutTimeout: resourceAwsTimestreamWriteTableRead, |
| 23 | + UpdateWithoutTimeout: resourceAwsTimestreamWriteTableUpdate, |
| 24 | + DeleteWithoutTimeout: resourceAwsTimestreamWriteTableDelete, |
| 25 | + |
| 26 | + Importer: &schema.ResourceImporter{ |
| 27 | + StateContext: schema.ImportStatePassthroughContext, |
| 28 | + }, |
| 29 | + |
| 30 | + Schema: map[string]*schema.Schema{ |
| 31 | + "arn": { |
| 32 | + Type: schema.TypeString, |
| 33 | + Computed: true, |
| 34 | + }, |
| 35 | + |
| 36 | + "database_name": { |
| 37 | + Type: schema.TypeString, |
| 38 | + Required: true, |
| 39 | + ForceNew: true, |
| 40 | + ValidateFunc: validation.All( |
| 41 | + validation.StringLenBetween(3, 64), |
| 42 | + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`), "must only include alphanumeric, underscore, period, or hyphen characters"), |
| 43 | + ), |
| 44 | + }, |
| 45 | + |
| 46 | + "retention_properties": { |
| 47 | + Type: schema.TypeList, |
| 48 | + Optional: true, |
| 49 | + Computed: true, |
| 50 | + MaxItems: 1, |
| 51 | + Elem: &schema.Resource{ |
| 52 | + Schema: map[string]*schema.Schema{ |
| 53 | + "magnetic_store_retention_period_in_days": { |
| 54 | + Type: schema.TypeInt, |
| 55 | + Required: true, |
| 56 | + ValidateFunc: validation.IntBetween(1, 73000), |
| 57 | + }, |
| 58 | + |
| 59 | + "memory_store_retention_period_in_hours": { |
| 60 | + Type: schema.TypeInt, |
| 61 | + Required: true, |
| 62 | + ValidateFunc: validation.IntBetween(1, 8766), |
| 63 | + }, |
| 64 | + }, |
| 65 | + }, |
| 66 | + }, |
| 67 | + |
| 68 | + "table_name": { |
| 69 | + Type: schema.TypeString, |
| 70 | + Required: true, |
| 71 | + ForceNew: true, |
| 72 | + ValidateFunc: validation.All( |
| 73 | + validation.StringLenBetween(3, 64), |
| 74 | + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`), "must only include alphanumeric, underscore, period, or hyphen characters"), |
| 75 | + ), |
| 76 | + }, |
| 77 | + |
| 78 | + "tags": tagsSchema(), |
| 79 | + |
| 80 | + "tags_all": tagsSchemaComputed(), |
| 81 | + }, |
| 82 | + |
| 83 | + CustomizeDiff: SetTagsDiff, |
| 84 | + } |
| 85 | +} |
| 86 | + |
| 87 | +func resourceAwsTimestreamWriteTableCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { |
| 88 | + conn := meta.(*AWSClient).timestreamwriteconn |
| 89 | + defaultTagsConfig := meta.(*AWSClient).DefaultTagsConfig |
| 90 | + tags := defaultTagsConfig.MergeTags(keyvaluetags.New(d.Get("tags").(map[string]interface{}))) |
| 91 | + |
| 92 | + tableName := d.Get("table_name").(string) |
| 93 | + input := ×treamwrite.CreateTableInput{ |
| 94 | + DatabaseName: aws.String(d.Get("database_name").(string)), |
| 95 | + TableName: aws.String(tableName), |
| 96 | + } |
| 97 | + |
| 98 | + if v, ok := d.GetOk("retention_properties"); ok && len(v.([]interface{})) > 0 && v.([]interface{}) != nil { |
| 99 | + input.RetentionProperties = expandTimestreamWriteRetentionProperties(v.([]interface{})) |
| 100 | + } |
| 101 | + |
| 102 | + if len(tags) > 0 { |
| 103 | + input.Tags = tags.IgnoreAws().TimestreamwriteTags() |
| 104 | + } |
| 105 | + |
| 106 | + output, err := conn.CreateTableWithContext(ctx, input) |
| 107 | + |
| 108 | + if err != nil { |
| 109 | + return diag.FromErr(fmt.Errorf("error creating Timestream Table (%s): %w", tableName, err)) |
| 110 | + } |
| 111 | + |
| 112 | + if output == nil || output.Table == nil { |
| 113 | + return diag.FromErr(fmt.Errorf("error creating Timestream Table (%s): empty output", tableName)) |
| 114 | + } |
| 115 | + |
| 116 | + d.SetId(fmt.Sprintf("%s:%s", aws.StringValue(output.Table.TableName), aws.StringValue(output.Table.DatabaseName))) |
| 117 | + |
| 118 | + return resourceAwsTimestreamWriteTableRead(ctx, d, meta) |
| 119 | +} |
| 120 | + |
| 121 | +func resourceAwsTimestreamWriteTableRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { |
| 122 | + conn := meta.(*AWSClient).timestreamwriteconn |
| 123 | + defaultTagsConfig := meta.(*AWSClient).DefaultTagsConfig |
| 124 | + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig |
| 125 | + |
| 126 | + tableName, databaseName, err := resourceAwsTimestreamWriteTableParseId(d.Id()) |
| 127 | + |
| 128 | + if err != nil { |
| 129 | + return diag.FromErr(err) |
| 130 | + } |
| 131 | + |
| 132 | + input := ×treamwrite.DescribeTableInput{ |
| 133 | + DatabaseName: aws.String(databaseName), |
| 134 | + TableName: aws.String(tableName), |
| 135 | + } |
| 136 | + |
| 137 | + output, err := conn.DescribeTableWithContext(ctx, input) |
| 138 | + |
| 139 | + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, timestreamwrite.ErrCodeResourceNotFoundException) { |
| 140 | + log.Printf("[WARN] Timestream Table %s not found, removing from state", d.Id()) |
| 141 | + d.SetId("") |
| 142 | + return nil |
| 143 | + } |
| 144 | + |
| 145 | + if output == nil || output.Table == nil { |
| 146 | + return diag.FromErr(fmt.Errorf("error reading Timestream Table (%s): empty output", d.Id())) |
| 147 | + } |
| 148 | + |
| 149 | + table := output.Table |
| 150 | + arn := aws.StringValue(table.Arn) |
| 151 | + |
| 152 | + d.Set("arn", arn) |
| 153 | + d.Set("database_name", table.DatabaseName) |
| 154 | + |
| 155 | + if err := d.Set("retention_properties", flattenTimestreamWriteRetentionProperties(table.RetentionProperties)); err != nil { |
| 156 | + return diag.FromErr(fmt.Errorf("error setting retention_properties: %w", err)) |
| 157 | + } |
| 158 | + |
| 159 | + d.Set("table_name", table.TableName) |
| 160 | + |
| 161 | + tags, err := keyvaluetags.TimestreamwriteListTags(conn, arn) |
| 162 | + |
| 163 | + if err != nil { |
| 164 | + return diag.FromErr(fmt.Errorf("error listing tags for Timestream Table (%s): %w", arn, err)) |
| 165 | + } |
| 166 | + |
| 167 | + tags = tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig) |
| 168 | + |
| 169 | + //lintignore:AWSR002 |
| 170 | + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { |
| 171 | + return diag.FromErr(fmt.Errorf("error setting tags: %w", err)) |
| 172 | + } |
| 173 | + |
| 174 | + if err := d.Set("tags_all", tags.Map()); err != nil { |
| 175 | + return diag.FromErr(fmt.Errorf("error setting tags_all: %w", err)) |
| 176 | + } |
| 177 | + |
| 178 | + return nil |
| 179 | +} |
| 180 | + |
| 181 | +func resourceAwsTimestreamWriteTableUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { |
| 182 | + conn := meta.(*AWSClient).timestreamwriteconn |
| 183 | + |
| 184 | + if d.HasChange("retention_properties") { |
| 185 | + tableName, databaseName, err := resourceAwsTimestreamWriteTableParseId(d.Id()) |
| 186 | + |
| 187 | + if err != nil { |
| 188 | + return diag.FromErr(err) |
| 189 | + } |
| 190 | + |
| 191 | + input := ×treamwrite.UpdateTableInput{ |
| 192 | + DatabaseName: aws.String(databaseName), |
| 193 | + RetentionProperties: expandTimestreamWriteRetentionProperties(d.Get("retention_properties").([]interface{})), |
| 194 | + TableName: aws.String(tableName), |
| 195 | + } |
| 196 | + |
| 197 | + _, err = conn.UpdateTableWithContext(ctx, input) |
| 198 | + |
| 199 | + if err != nil { |
| 200 | + return diag.FromErr(fmt.Errorf("error updating Timestream Table (%s): %w", d.Id(), err)) |
| 201 | + } |
| 202 | + } |
| 203 | + |
| 204 | + if d.HasChange("tags_all") { |
| 205 | + o, n := d.GetChange("tags_all") |
| 206 | + |
| 207 | + if err := keyvaluetags.TimestreamwriteUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { |
| 208 | + return diag.FromErr(fmt.Errorf("error updating Timestream Table (%s) tags: %w", d.Get("arn").(string), err)) |
| 209 | + } |
| 210 | + } |
| 211 | + |
| 212 | + return resourceAwsTimestreamWriteTableRead(ctx, d, meta) |
| 213 | +} |
| 214 | + |
| 215 | +func resourceAwsTimestreamWriteTableDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { |
| 216 | + conn := meta.(*AWSClient).timestreamwriteconn |
| 217 | + |
| 218 | + tableName, databaseName, err := resourceAwsTimestreamWriteTableParseId(d.Id()) |
| 219 | + |
| 220 | + if err != nil { |
| 221 | + return diag.FromErr(err) |
| 222 | + } |
| 223 | + |
| 224 | + input := ×treamwrite.DeleteTableInput{ |
| 225 | + DatabaseName: aws.String(databaseName), |
| 226 | + TableName: aws.String(tableName), |
| 227 | + } |
| 228 | + |
| 229 | + _, err = conn.DeleteTableWithContext(ctx, input) |
| 230 | + |
| 231 | + if tfawserr.ErrCodeEquals(err, timestreamwrite.ErrCodeResourceNotFoundException) { |
| 232 | + return nil |
| 233 | + } |
| 234 | + |
| 235 | + if err != nil { |
| 236 | + return diag.FromErr(fmt.Errorf("error deleting Timestream Table (%s): %w", d.Id(), err)) |
| 237 | + } |
| 238 | + |
| 239 | + return nil |
| 240 | +} |
| 241 | + |
| 242 | +func expandTimestreamWriteRetentionProperties(l []interface{}) *timestreamwrite.RetentionProperties { |
| 243 | + if len(l) == 0 || l[0] == nil { |
| 244 | + return nil |
| 245 | + } |
| 246 | + |
| 247 | + tfMap, ok := l[0].(map[string]interface{}) |
| 248 | + |
| 249 | + if !ok { |
| 250 | + return nil |
| 251 | + } |
| 252 | + |
| 253 | + rp := ×treamwrite.RetentionProperties{} |
| 254 | + |
| 255 | + if v, ok := tfMap["magnetic_store_retention_period_in_days"].(int); ok { |
| 256 | + rp.MagneticStoreRetentionPeriodInDays = aws.Int64(int64(v)) |
| 257 | + } |
| 258 | + |
| 259 | + if v, ok := tfMap["memory_store_retention_period_in_hours"].(int); ok { |
| 260 | + rp.MemoryStoreRetentionPeriodInHours = aws.Int64(int64(v)) |
| 261 | + } |
| 262 | + |
| 263 | + return rp |
| 264 | +} |
| 265 | + |
| 266 | +func flattenTimestreamWriteRetentionProperties(rp *timestreamwrite.RetentionProperties) []interface{} { |
| 267 | + if rp == nil { |
| 268 | + return []interface{}{} |
| 269 | + } |
| 270 | + |
| 271 | + m := map[string]interface{}{ |
| 272 | + "magnetic_store_retention_period_in_days": aws.Int64Value(rp.MagneticStoreRetentionPeriodInDays), |
| 273 | + "memory_store_retention_period_in_hours": aws.Int64Value(rp.MemoryStoreRetentionPeriodInHours), |
| 274 | + } |
| 275 | + |
| 276 | + return []interface{}{m} |
| 277 | +} |
| 278 | + |
| 279 | +func resourceAwsTimestreamWriteTableParseId(id string) (string, string, error) { |
| 280 | + idParts := strings.SplitN(id, ":", 2) |
| 281 | + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { |
| 282 | + return "", "", fmt.Errorf("unexpected format of ID (%s), expected table_name:database_name", id) |
| 283 | + } |
| 284 | + return idParts[0], idParts[1], nil |
| 285 | +} |
0 commit comments