Skip to content

Commit 9813089

Browse files
authored
Merge pull request #28220 from hashicorp/b-dms-endpoint-errors-region
dms/s3_endpoint: Different attributes by region
2 parents a559c19 + 1d6f0e6 commit 9813089

File tree

4 files changed

+30
-25
lines changed

4 files changed

+30
-25
lines changed

.changelog/28220.txt

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:bug
2+
resource/aws_dms_s3_endpoint: Fix disparate handling of endpoint attributes in different regions
3+
```

internal/service/dms/s3_endpoint.go

+19-16
Original file line numberDiff line numberDiff line change
@@ -411,7 +411,6 @@ func resourceS3EndpointRead(d *schema.ResourceData, meta interface{}) error {
411411

412412
s3settings := endpoint.S3Settings
413413
d.Set("add_column_name", s3settings.AddColumnName)
414-
d.Set("add_trailing_padding_character", s3settings.AddTrailingPaddingCharacter)
415414
d.Set("bucket_folder", s3settings.BucketFolder)
416415
d.Set("bucket_name", s3settings.BucketName)
417416
d.Set("canned_acl_for_objects", s3settings.CannedAclForObjects)
@@ -420,36 +419,40 @@ func resourceS3EndpointRead(d *schema.ResourceData, meta interface{}) error {
420419
d.Set("cdc_max_batch_interval", s3settings.CdcMaxBatchInterval)
421420
d.Set("cdc_min_file_size", s3settings.CdcMinFileSize)
422421
d.Set("cdc_path", s3settings.CdcPath)
423-
d.Set("compression_type", s3settings.CompressionType)
424422
d.Set("csv_delimiter", s3settings.CsvDelimiter)
425-
d.Set("csv_no_sup_value", s3settings.CsvNoSupValue)
426423
d.Set("csv_null_value", s3settings.CsvNullValue)
427424
d.Set("csv_row_delimiter", s3settings.CsvRowDelimiter)
428-
d.Set("data_format", s3settings.DataFormat)
429425
d.Set("data_page_size", s3settings.DataPageSize)
430-
d.Set("date_partition_delimiter", strings.ToUpper(aws.StringValue(s3settings.DatePartitionDelimiter)))
431-
d.Set("date_partition_enabled", s3settings.DatePartitionEnabled)
432-
d.Set("date_partition_sequence", s3settings.DatePartitionSequence)
433-
d.Set("date_partition_timezone", s3settings.DatePartitionTimezone)
434426
d.Set("dict_page_size_limit", s3settings.DictPageSizeLimit)
435427
d.Set("enable_statistics", s3settings.EnableStatistics)
436428
d.Set("encoding_type", s3settings.EncodingType)
437-
d.Set("encryption_mode", s3settings.EncryptionMode)
438429
d.Set("expected_bucket_owner", s3settings.ExpectedBucketOwner)
439430
d.Set("ignore_header_rows", s3settings.IgnoreHeaderRows)
440431
d.Set("include_op_for_full_load", s3settings.IncludeOpForFullLoad)
441432
d.Set("max_file_size", s3settings.MaxFileSize)
442-
d.Set("parquet_timestamp_in_millisecond", s3settings.ParquetTimestampInMillisecond)
443-
d.Set("parquet_version", s3settings.ParquetVersion)
444-
d.Set("preserve_transactions", s3settings.PreserveTransactions)
445433
d.Set("rfc_4180", s3settings.Rfc4180)
446434
d.Set("row_group_length", s3settings.RowGroupLength)
447-
d.Set("server_side_encryption_kms_key_id", s3settings.ServerSideEncryptionKmsKeyId)
448435
d.Set("service_access_role_arn", s3settings.ServiceAccessRoleArn)
449436
d.Set("timestamp_column_name", s3settings.TimestampColumnName)
450-
d.Set("use_csv_no_sup_value", s3settings.UseCsvNoSupValue)
451437
d.Set("use_task_start_time_for_full_load_timestamp", s3settings.UseTaskStartTimeForFullLoadTimestamp)
452438

439+
if d.Get("endpoint_type").(string) == dms.ReplicationEndpointTypeValueTarget {
440+
d.Set("add_trailing_padding_character", s3settings.AddTrailingPaddingCharacter)
441+
d.Set("compression_type", s3settings.CompressionType)
442+
d.Set("csv_no_sup_value", s3settings.CsvNoSupValue)
443+
d.Set("data_format", s3settings.DataFormat)
444+
d.Set("date_partition_delimiter", strings.ToUpper(aws.StringValue(s3settings.DatePartitionDelimiter)))
445+
d.Set("date_partition_enabled", s3settings.DatePartitionEnabled)
446+
d.Set("date_partition_sequence", s3settings.DatePartitionSequence)
447+
d.Set("date_partition_timezone", s3settings.DatePartitionTimezone)
448+
d.Set("encryption_mode", s3settings.EncryptionMode)
449+
d.Set("parquet_timestamp_in_millisecond", s3settings.ParquetTimestampInMillisecond)
450+
d.Set("parquet_version", s3settings.ParquetVersion)
451+
d.Set("preserve_transactions", s3settings.PreserveTransactions)
452+
d.Set("server_side_encryption_kms_key_id", s3settings.ServerSideEncryptionKmsKeyId)
453+
d.Set("use_csv_no_sup_value", s3settings.UseCsvNoSupValue)
454+
}
455+
453456
p, err := structure.NormalizeJsonString(aws.StringValue(s3settings.ExternalTableDefinition))
454457
if err != nil {
455458
return create.Error(names.DMS, create.ErrActionSetting, ResNameS3Endpoint, d.Id(), err)
@@ -610,7 +613,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings {
610613
s3s.CdcPath = aws.String(v.(string))
611614
}
612615

613-
if v, ok := d.GetOk("compression_type"); ok { // likely only useful for target
616+
if v, ok := d.GetOk("compression_type"); ok && target { // likely only useful for target
614617
s3s.CompressionType = aws.String(v.(string))
615618
}
616619

@@ -642,7 +645,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings {
642645
s3s.DatePartitionDelimiter = aws.String(v.(string))
643646
}
644647

645-
if v, ok := d.Get("date_partition_enabled").(bool); ok { // likely only useful for target
648+
if v, ok := d.Get("date_partition_enabled").(bool); ok && target { // likely only useful for target
646649
s3s.DatePartitionEnabled = aws.Bool(v)
647650
}
648651

internal/service/dms/s3_endpoint_test.go

+5-6
Original file line numberDiff line numberDiff line change
@@ -292,9 +292,10 @@ func TestAccDMSS3Endpoint_sourceSimple(t *testing.T) {
292292
PlanOnly: true,
293293
},
294294
{
295-
ResourceName: resourceName,
296-
ImportState: true,
297-
ImportStateVerify: true,
295+
ResourceName: resourceName,
296+
ImportState: true,
297+
ImportStateVerify: true,
298+
ImportStateVerifyIgnore: []string{"compression_type", "date_partition_enabled", "parquet_timestamp_in_millisecond", "preserve_transactions", "use_csv_no_sup_value"},
298299
},
299300
},
300301
})
@@ -330,7 +331,7 @@ func TestAccDMSS3Endpoint_source(t *testing.T) {
330331
resource.TestCheckResourceAttr(resourceName, "cdc_inserts_and_updates", "true"),
331332
resource.TestCheckResourceAttr(resourceName, "cdc_max_batch_interval", "100"),
332333
resource.TestCheckResourceAttr(resourceName, "cdc_min_file_size", "16"),
333-
resource.TestCheckResourceAttr(resourceName, "compression_type", "GZIP"),
334+
resource.TestCheckResourceAttr(resourceName, "compression_type", "NONE"),
334335
resource.TestCheckResourceAttr(resourceName, "csv_null_value", "?"),
335336
resource.TestCheckResourceAttr(resourceName, "data_page_size", "1100000"),
336337
resource.TestCheckResourceAttr(resourceName, "date_partition_enabled", "true"),
@@ -683,7 +684,6 @@ resource "aws_dms_s3_endpoint" "test" {
683684
cdc_inserts_only = false
684685
cdc_max_batch_interval = 100
685686
cdc_min_file_size = 16
686-
compression_type = "GZIP"
687687
csv_null_value = "?"
688688
data_page_size = 1100000
689689
date_partition_enabled = true
@@ -740,7 +740,6 @@ resource "aws_dms_s3_endpoint" "test" {
740740
cdc_inserts_only = true
741741
cdc_max_batch_interval = 101
742742
cdc_min_file_size = 17
743-
compression_type = "NONE"
744743
csv_null_value = "0"
745744
data_page_size = 1000000
746745
date_partition_enabled = false

website/docs/r/dms_s3_endpoint.html.markdown

+3-3
Original file line numberDiff line numberDiff line change
@@ -112,15 +112,15 @@ The following arguments are optional:
112112
* `cdc_max_batch_interval` - (Optional) Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is `60`.)
113113
* `cdc_min_file_size` - (Optional) Minimum file size, defined in kilobytes, to reach for a file output. (AWS default is 32 MB.)
114114
* `certificate_arn` - (Optional, Default: empty string) ARN for the certificate.
115-
* `compression_type` - (Optional) Set to compress target files. Valid values are `GZIP` and `NONE`. Default is `NONE`.
115+
* `compression_type` - (Optional) Set to compress target files. Valid values are `GZIP` and `NONE`. Default is `NONE`. (Ignored for source endpoints.)
116116
* `csv_delimiter` - (Optional) Delimiter used to separate columns in the source files. Default is `,`.
117117
* `csv_no_sup_value` - (Optional) Only applies if output files for a CDC load are written in .csv format. If `use_csv_no_sup_value` is set to `true`, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of `use_csv_no_sup_value`. (Ignored for source endpoints.)
118118
* `csv_null_value` - (Optional) String to as null when writing to the target. (AWS default is `NULL`.)
119119
* `csv_row_delimiter` - (Optional) Delimiter used to separate rows in the source files. Default is newline (_i.e._, `\n`).
120120
* `data_format` - (Optional) Output format for the files that AWS DMS uses to create S3 objects. Valid values are `csv` and `parquet`. (Ignored for source endpoints -- only `csv` is valid.)
121121
* `data_page_size` - (Optional) Size of one data page in bytes. (AWS default is 1 MiB, _i.e._, `1048576`.)
122122
* `date_partition_delimiter` - (Optional) Date separating delimiter to use during folder partitioning. Valid values are `SLASH`, `UNDERSCORE`, `DASH`, and `NONE`. (AWS default is `SLASH`.) (Ignored for source endpoints.)
123-
* `date_partition_enabled` - (Optional) Partition S3 bucket folders based on transaction commit dates. Default is `false`.
123+
* `date_partition_enabled` - (Optional) Partition S3 bucket folders based on transaction commit dates. Default is `false`. (Ignored for source endpoints.)
124124
* `date_partition_sequence` - (Optional) Date format to use during folder partitioning. Use this parameter when `date_partition_enabled` is set to true. Valid values are `YYYYMMDD`, `YYYYMMDDHH`, `YYYYMM`, `MMYYYYDD`, and `DDMMYYYY`. (AWS default is `YYYYMMDD`.) (Ignored for source endpoints.)
125125
* `date_partition_timezone` - (Optional) Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (_e.g._, `Europe/Paris`). Use this when `date_partition_enabled` is `true`. (Ignored for source endpoints.)
126126
* `dict_page_size_limit` - (Optional) Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, _i.e._, `1048576`.)
@@ -137,7 +137,7 @@ The following arguments are optional:
137137
* `preserve_transactions` - (Optional) Whether DMS saves the transaction order for a CDC load on the S3 target specified by `cdc_path`. Default is `false`. (Ignored for source endpoints.)
138138
* `rfc_4180` - (Optional) For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is `true`.
139139
* `row_group_length` - (Optional) Number of rows in a row group. (AWS default is `10000`.)
140-
* `server_side_encryption_kms_key_id` - (Optional) When `encryption_mode` is `SSE_KMS`, ARN for the AWS KMS key. (Ignored for source endpoints -- only `SSE_S3` is valid.)
140+
* `server_side_encryption_kms_key_id` - (Optional) When `encryption_mode` is `SSE_KMS`, ARN for the AWS KMS key. (Ignored for source endpoints -- only `SSE_S3` `encryption_mode` is valid.)
141141
* `ssl_mode` - (Optional) SSL mode to use for the connection. Valid values are `none`, `require`, `verify-ca`, `verify-full`. (AWS default is `none`.)
142142
* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
143143
* `timestamp_column_name` - (Optional) Column to add with timestamp information to the endpoint data for an Amazon S3 target.

0 commit comments

Comments
 (0)