Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

dms/s3_endpoint: Different attributes by region #28220

Merged
merged 2 commits into from
Dec 6, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/28220.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
resource/aws_dms_s3_endpoint: Fix disparate handling of endpoint attributes in different regions
```
35 changes: 19 additions & 16 deletions internal/service/dms/s3_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,6 @@ func resourceS3EndpointRead(d *schema.ResourceData, meta interface{}) error {

s3settings := endpoint.S3Settings
d.Set("add_column_name", s3settings.AddColumnName)
d.Set("add_trailing_padding_character", s3settings.AddTrailingPaddingCharacter)
d.Set("bucket_folder", s3settings.BucketFolder)
d.Set("bucket_name", s3settings.BucketName)
d.Set("canned_acl_for_objects", s3settings.CannedAclForObjects)
Expand All @@ -420,36 +419,40 @@ func resourceS3EndpointRead(d *schema.ResourceData, meta interface{}) error {
d.Set("cdc_max_batch_interval", s3settings.CdcMaxBatchInterval)
d.Set("cdc_min_file_size", s3settings.CdcMinFileSize)
d.Set("cdc_path", s3settings.CdcPath)
d.Set("compression_type", s3settings.CompressionType)
d.Set("csv_delimiter", s3settings.CsvDelimiter)
d.Set("csv_no_sup_value", s3settings.CsvNoSupValue)
d.Set("csv_null_value", s3settings.CsvNullValue)
d.Set("csv_row_delimiter", s3settings.CsvRowDelimiter)
d.Set("data_format", s3settings.DataFormat)
d.Set("data_page_size", s3settings.DataPageSize)
d.Set("date_partition_delimiter", strings.ToUpper(aws.StringValue(s3settings.DatePartitionDelimiter)))
d.Set("date_partition_enabled", s3settings.DatePartitionEnabled)
d.Set("date_partition_sequence", s3settings.DatePartitionSequence)
d.Set("date_partition_timezone", s3settings.DatePartitionTimezone)
d.Set("dict_page_size_limit", s3settings.DictPageSizeLimit)
d.Set("enable_statistics", s3settings.EnableStatistics)
d.Set("encoding_type", s3settings.EncodingType)
d.Set("encryption_mode", s3settings.EncryptionMode)
d.Set("expected_bucket_owner", s3settings.ExpectedBucketOwner)
d.Set("ignore_header_rows", s3settings.IgnoreHeaderRows)
d.Set("include_op_for_full_load", s3settings.IncludeOpForFullLoad)
d.Set("max_file_size", s3settings.MaxFileSize)
d.Set("parquet_timestamp_in_millisecond", s3settings.ParquetTimestampInMillisecond)
d.Set("parquet_version", s3settings.ParquetVersion)
d.Set("preserve_transactions", s3settings.PreserveTransactions)
d.Set("rfc_4180", s3settings.Rfc4180)
d.Set("row_group_length", s3settings.RowGroupLength)
d.Set("server_side_encryption_kms_key_id", s3settings.ServerSideEncryptionKmsKeyId)
d.Set("service_access_role_arn", s3settings.ServiceAccessRoleArn)
d.Set("timestamp_column_name", s3settings.TimestampColumnName)
d.Set("use_csv_no_sup_value", s3settings.UseCsvNoSupValue)
d.Set("use_task_start_time_for_full_load_timestamp", s3settings.UseTaskStartTimeForFullLoadTimestamp)

if d.Get("endpoint_type").(string) == dms.ReplicationEndpointTypeValueTarget {
d.Set("add_trailing_padding_character", s3settings.AddTrailingPaddingCharacter)
d.Set("compression_type", s3settings.CompressionType)
d.Set("csv_no_sup_value", s3settings.CsvNoSupValue)
d.Set("data_format", s3settings.DataFormat)
d.Set("date_partition_delimiter", strings.ToUpper(aws.StringValue(s3settings.DatePartitionDelimiter)))
d.Set("date_partition_enabled", s3settings.DatePartitionEnabled)
d.Set("date_partition_sequence", s3settings.DatePartitionSequence)
d.Set("date_partition_timezone", s3settings.DatePartitionTimezone)
d.Set("encryption_mode", s3settings.EncryptionMode)
d.Set("parquet_timestamp_in_millisecond", s3settings.ParquetTimestampInMillisecond)
d.Set("parquet_version", s3settings.ParquetVersion)
d.Set("preserve_transactions", s3settings.PreserveTransactions)
d.Set("server_side_encryption_kms_key_id", s3settings.ServerSideEncryptionKmsKeyId)
d.Set("use_csv_no_sup_value", s3settings.UseCsvNoSupValue)
}

p, err := structure.NormalizeJsonString(aws.StringValue(s3settings.ExternalTableDefinition))
if err != nil {
return create.Error(names.DMS, create.ErrActionSetting, ResNameS3Endpoint, d.Id(), err)
Expand Down Expand Up @@ -610,7 +613,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings {
s3s.CdcPath = aws.String(v.(string))
}

if v, ok := d.GetOk("compression_type"); ok { // likely only useful for target
if v, ok := d.GetOk("compression_type"); ok && target { // likely only useful for target
s3s.CompressionType = aws.String(v.(string))
}

Expand Down Expand Up @@ -642,7 +645,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings {
s3s.DatePartitionDelimiter = aws.String(v.(string))
}

if v, ok := d.Get("date_partition_enabled").(bool); ok { // likely only useful for target
if v, ok := d.Get("date_partition_enabled").(bool); ok && target { // likely only useful for target
s3s.DatePartitionEnabled = aws.Bool(v)
}

Expand Down
11 changes: 5 additions & 6 deletions internal/service/dms/s3_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -292,9 +292,10 @@ func TestAccDMSS3Endpoint_sourceSimple(t *testing.T) {
PlanOnly: true,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"compression_type", "date_partition_enabled", "parquet_timestamp_in_millisecond", "preserve_transactions", "use_csv_no_sup_value"},
},
},
})
Expand Down Expand Up @@ -330,7 +331,7 @@ func TestAccDMSS3Endpoint_source(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "cdc_inserts_and_updates", "true"),
resource.TestCheckResourceAttr(resourceName, "cdc_max_batch_interval", "100"),
resource.TestCheckResourceAttr(resourceName, "cdc_min_file_size", "16"),
resource.TestCheckResourceAttr(resourceName, "compression_type", "GZIP"),
resource.TestCheckResourceAttr(resourceName, "compression_type", "NONE"),
resource.TestCheckResourceAttr(resourceName, "csv_null_value", "?"),
resource.TestCheckResourceAttr(resourceName, "data_page_size", "1100000"),
resource.TestCheckResourceAttr(resourceName, "date_partition_enabled", "true"),
Expand Down Expand Up @@ -683,7 +684,6 @@ resource "aws_dms_s3_endpoint" "test" {
cdc_inserts_only = false
cdc_max_batch_interval = 100
cdc_min_file_size = 16
compression_type = "GZIP"
csv_null_value = "?"
data_page_size = 1100000
date_partition_enabled = true
Expand Down Expand Up @@ -740,7 +740,6 @@ resource "aws_dms_s3_endpoint" "test" {
cdc_inserts_only = true
cdc_max_batch_interval = 101
cdc_min_file_size = 17
compression_type = "NONE"
csv_null_value = "0"
data_page_size = 1000000
date_partition_enabled = false
Expand Down
6 changes: 3 additions & 3 deletions website/docs/r/dms_s3_endpoint.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -112,15 +112,15 @@ The following arguments are optional:
* `cdc_max_batch_interval` - (Optional) Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is `60`.)
* `cdc_min_file_size` - (Optional) Minimum file size, defined in kilobytes, to reach for a file output. (AWS default is 32 MB.)
* `certificate_arn` - (Optional, Default: empty string) ARN for the certificate.
* `compression_type` - (Optional) Set to compress target files. Valid values are `GZIP` and `NONE`. Default is `NONE`.
* `compression_type` - (Optional) Set to compress target files. Valid values are `GZIP` and `NONE`. Default is `NONE`. (Ignored for source endpoints.)
* `csv_delimiter` - (Optional) Delimiter used to separate columns in the source files. Default is `,`.
* `csv_no_sup_value` - (Optional) Only applies if output files for a CDC load are written in .csv format. If `use_csv_no_sup_value` is set to `true`, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of `use_csv_no_sup_value`. (Ignored for source endpoints.)
* `csv_null_value` - (Optional) String to as null when writing to the target. (AWS default is `NULL`.)
* `csv_row_delimiter` - (Optional) Delimiter used to separate rows in the source files. Default is newline (_i.e._, `\n`).
* `data_format` - (Optional) Output format for the files that AWS DMS uses to create S3 objects. Valid values are `csv` and `parquet`. (Ignored for source endpoints -- only `csv` is valid.)
* `data_page_size` - (Optional) Size of one data page in bytes. (AWS default is 1 MiB, _i.e._, `1048576`.)
* `date_partition_delimiter` - (Optional) Date separating delimiter to use during folder partitioning. Valid values are `SLASH`, `UNDERSCORE`, `DASH`, and `NONE`. (AWS default is `SLASH`.) (Ignored for source endpoints.)
* `date_partition_enabled` - (Optional) Partition S3 bucket folders based on transaction commit dates. Default is `false`.
* `date_partition_enabled` - (Optional) Partition S3 bucket folders based on transaction commit dates. Default is `false`. (Ignored for source endpoints.)
* `date_partition_sequence` - (Optional) Date format to use during folder partitioning. Use this parameter when `date_partition_enabled` is set to true. Valid values are `YYYYMMDD`, `YYYYMMDDHH`, `YYYYMM`, `MMYYYYDD`, and `DDMMYYYY`. (AWS default is `YYYYMMDD`.) (Ignored for source endpoints.)
* `date_partition_timezone` - (Optional) Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (_e.g._, `Europe/Paris`). Use this when `date_partition_enabled` is `true`. (Ignored for source endpoints.)
* `dict_page_size_limit` - (Optional) Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, _i.e._, `1048576`.)
Expand All @@ -137,7 +137,7 @@ The following arguments are optional:
* `preserve_transactions` - (Optional) Whether DMS saves the transaction order for a CDC load on the S3 target specified by `cdc_path`. Default is `false`. (Ignored for source endpoints.)
* `rfc_4180` - (Optional) For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is `true`.
* `row_group_length` - (Optional) Number of rows in a row group. (AWS default is `10000`.)
* `server_side_encryption_kms_key_id` - (Optional) When `encryption_mode` is `SSE_KMS`, ARN for the AWS KMS key. (Ignored for source endpoints -- only `SSE_S3` is valid.)
* `server_side_encryption_kms_key_id` - (Optional) When `encryption_mode` is `SSE_KMS`, ARN for the AWS KMS key. (Ignored for source endpoints -- only `SSE_S3` `encryption_mode` is valid.)
* `ssl_mode` - (Optional) SSL mode to use for the connection. Valid values are `none`, `require`, `verify-ca`, `verify-full`. (AWS default is `none`.)
* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
* `timestamp_column_name` - (Optional) Column to add with timestamp information to the endpoint data for an Amazon S3 target.
Expand Down